Пример #1
0
    def RunImpl(self, parameters):
        analysis_urlsafe_key = parameters.analysis_urlsafe_key
        analysis = entity_util.GetEntityFromUrlsafeKey(analysis_urlsafe_key)
        assert analysis, 'Cannot retrieve analysis entry from datastore {}'.format(
            analysis_urlsafe_key)

        # Determine if flakiness is still persistent.
        still_flaky = flake_analysis_util.FlakyAtMostRecentlyAnalyzedCommit(
            analysis)

        # TODO(crbug.com/905754): Call auto actions as an async taskqueue task.
        flake_analysis_actions.OnCulpritIdentified(analysis_urlsafe_key)

        if not still_flaky:  # pragma: no cover.
            analysis.LogInfo(
                'No further actions taken due to latest commit being stable.')
            return

        # Data needed for reverts.
        build_key = BaseBuildModel.CreateBuildKey(
            analysis.original_master_name, analysis.original_builder_name,
            analysis.original_build_number)
        with pipeline.InOrder():
            # Revert culprit if applicable.
            yield CreateAndSubmitRevertPipeline(
                self.CreateInputObjectInstance(
                    CreateAndSubmitRevertInput,
                    analysis_urlsafe_key=analysis.key.urlsafe(),
                    build_key=build_key))

            # Update culprit code review.
            yield NotifyCulpritPipeline(
                self.CreateInputObjectInstance(
                    NotifyCulpritInput,
                    analysis_urlsafe_key=analysis_urlsafe_key))
  def RunImpl(self, parameters):
    analysis_urlsafe_key = parameters.analysis_urlsafe_key
    analysis = ndb.Key(urlsafe=analysis_urlsafe_key).get()
    assert analysis, 'Analysis missing unexpectedly!'

    step_metadata = (
        step_util.LegacyGetStepMetadata(
            analysis.master_name, analysis.builder_name, analysis.build_number,
            analysis.step_name) or step_util.LegacyGetStepMetadata(
                analysis.original_master_name, analysis.original_builder_name,
                analysis.original_build_number, analysis.original_step_name))

    step_metadata = StepMetadata.FromSerializable(step_metadata)

    recent_commit_position, recent_revision = (
        build_util.GetLatestCommitPositionAndRevision(
            analysis.master_name, analysis.builder_name,
            step_metadata.isolate_target_name))

    if (not analysis.data_points or
        analysis.data_points[0].commit_position >= recent_commit_position or
        ((analysis.flakiness_verification_data_points and
          analysis.flakiness_verification_data_points[-1].commit_position >=
          recent_commit_position))):
      # The analysis already has the most up-to-date info on recent commits.
      return

    with pipeline.InOrder():
      get_sha_output = yield GetIsolateShaForCommitPositionPipeline(
          self.CreateInputObjectInstance(
              GetIsolateShaForCommitPositionParameters,
              analysis_urlsafe_key=analysis_urlsafe_key,
              commit_position=recent_commit_position,
              dimensions=None,  # Not used.
              revision=recent_revision,
              step_metadata=step_metadata,
              upper_bound_build_number=analysis.build_number))

      # Determine approximate pass rate at the commit position/isolate sha.
      recent_flakiness = yield DetermineApproximatePassRatePipeline(
          self.CreateInputObjectInstance(
              DetermineApproximatePassRateInput,
              builder_name=analysis.builder_name,
              commit_position=recent_commit_position,
              flakiness_thus_far=None,
              get_isolate_sha_output=get_sha_output,
              previous_swarming_task_output=None,
              master_name=analysis.master_name,
              reference_build_number=analysis.build_number,
              revision=recent_revision,
              step_name=analysis.step_name,
              test_name=analysis.test_name))

      yield SaveFlakinessVerificationPipeline(
          self.CreateInputObjectInstance(
              SaveFlakinessVerificationInput,
              analysis_urlsafe_key=analysis_urlsafe_key,
              flakiness=recent_flakiness))
Пример #3
0
    def RunImpl(self, parameters):
        analysis_urlsafe_key = parameters.analysis_urlsafe_key
        analysis = ndb.Key(urlsafe=analysis_urlsafe_key).get()
        assert analysis, 'Cannot retrieve analysis entry from datastore'
        if analysis.request_time:
            monitoring.pipeline_times.increment_by(
                int((time_util.GetUTCNow() -
                     analysis.request_time).total_seconds()),
                {'type': 'flake'})

        commit_position_parameters = parameters.analyze_commit_position_parameters
        commit_position_to_analyze = (
            commit_position_parameters.next_commit_id.commit_position
            if commit_position_parameters.next_commit_id else None)

        if commit_position_to_analyze is None:
            # No further commit position to analyze. The analysis is completed.
            culprit_commit_position = (
                commit_position_parameters.culprit_commit_id.commit_position
                if commit_position_parameters.culprit_commit_id else None)

            if culprit_commit_position is None:
                analysis.LogInfo('Analysis completed with no findings')
                analysis.Update(
                    result_status=result_status.NOT_FOUND_UNTRIAGED)

                if not parameters.rerun:  # pragma: no branch
                    # Don't double report for reruns.
                    yield ReportAnalysisEventPipeline(
                        self.CreateInputObjectInstance(
                            ReportEventInput,
                            analysis_urlsafe_key=analysis_urlsafe_key))
                return

            # Create a FlakeCulprit.
            culprit_revision = commit_position_parameters.culprit_commit_id.revision
            assert culprit_revision, 'No revision for commit {}'.format(
                culprit_commit_position)
            culprit = flake_analysis_util.UpdateCulprit(
                analysis_urlsafe_key, culprit_revision,
                culprit_commit_position)
            confidence_score = confidence_score_util.CalculateCulpritConfidenceScore(
                analysis, culprit_commit_position)

            # Associate FlakeCulprit with the analysis.
            analysis.Update(confidence_in_culprit=confidence_score,
                            culprit_urlsafe_key=culprit.key.urlsafe(),
                            result_status=result_status.FOUND_UNTRIAGED)

            with pipeline.InOrder():
                if flake_analysis_util.ShouldTakeAutoAction(
                        analysis, parameters.rerun):  # pragma: no branch

                    # Check recent flakiness.
                    yield AnalyzeRecentFlakinessPipeline(
                        self.CreateInputObjectInstance(
                            AnalyzeRecentFlakinessInput,
                            analysis_urlsafe_key=analysis_urlsafe_key))

                    # Perform auto actions after checking recent flakiness.
                    yield _PerformAutoActionsPipeline(
                        self.CreateInputObjectInstance(
                            _PerformAutoActionsInput,
                            analysis_urlsafe_key=analysis_urlsafe_key))

                if not parameters.rerun:  # pragma: no branch
                    # Report events to BQ.
                    yield ReportAnalysisEventPipeline(
                        self.CreateInputObjectInstance(
                            ReportEventInput,
                            analysis_urlsafe_key=analysis_urlsafe_key))
                return

        revision_to_analyze = commit_position_parameters.next_commit_id.revision
        assert revision_to_analyze, 'No revision for commit {}'.format(
            commit_position_to_analyze)

        # Check for bot availability. If this is a user rerun or the maximum retries
        # have been reached, continue regardless of bot availability.
        if flake_analysis_util.CanStartAnalysisImmediately(
                parameters.step_metadata, parameters.retries,
                parameters.manually_triggered):

            # Set analysis status to RUNNING if not already.
            analysis.InitializeRunning()

            analysis.LogInfo('Analyzing commit position {}'.format(
                commit_position_to_analyze))

            with pipeline.InOrder():
                # Determine isolate sha to run swarming tasks on.
                upper_bound_build_number = analysis.GetLowestUpperBoundBuildNumber(
                    commit_position_to_analyze)
                get_sha_output = yield GetIsolateShaForCommitPositionPipeline(
                    self.CreateInputObjectInstance(
                        GetIsolateShaForCommitPositionParameters,
                        analysis_urlsafe_key=analysis_urlsafe_key,
                        commit_position=commit_position_to_analyze,
                        dimensions=parameters.dimensions,
                        step_metadata=parameters.step_metadata,
                        revision=revision_to_analyze,
                        upper_bound_build_number=upper_bound_build_number))

                # Determine approximate pass rate at the commit position/isolate sha.
                flakiness = yield DetermineApproximatePassRatePipeline(
                    self.CreateInputObjectInstance(
                        DetermineApproximatePassRateInput,
                        builder_name=analysis.builder_name,
                        commit_position=commit_position_to_analyze,
                        flakiness_thus_far=None,
                        get_isolate_sha_output=get_sha_output,
                        master_name=analysis.master_name,
                        previous_swarming_task_output=None,
                        reference_build_number=analysis.build_number,
                        revision=revision_to_analyze,
                        step_name=analysis.step_name,
                        test_name=analysis.test_name))

                yield UpdateFlakeAnalysisDataPointsPipeline(
                    self.CreateInputObjectInstance(
                        UpdateFlakeAnalysisDataPointsInput,
                        analysis_urlsafe_key=analysis_urlsafe_key,
                        flakiness=flakiness))

                # Determine the next commit position to analyze.
                next_commit_position_output = yield NextCommitPositionPipeline(
                    self.CreateInputObjectInstance(
                        NextCommitPositionInput,
                        analysis_urlsafe_key=analysis_urlsafe_key,
                        commit_position_range=parameters.commit_position_range,
                        step_metadata=parameters.step_metadata))

                # Recurse on the new commit position.
                yield RecursiveAnalyzeFlakePipeline(
                    self.CreateInputObjectInstance(
                        AnalyzeFlakeInput,
                        analysis_urlsafe_key=analysis_urlsafe_key,
                        analyze_commit_position_parameters=
                        next_commit_position_output,
                        commit_position_range=parameters.commit_position_range,
                        dimensions=parameters.dimensions,
                        manually_triggered=parameters.manually_triggered,
                        rerun=parameters.rerun,
                        retries=0,
                        step_metadata=parameters.step_metadata))
        else:
            # Can't start the analysis just yet, reschedule.
            parameters.retries += 1
            delay_seconds = flake_analysis_util.CalculateDelaySecondsBetweenRetries(
                analysis, parameters.retries, parameters.manually_triggered)
            delay = yield DelayPipeline(delay_seconds)

            with pipeline.After(delay):
                yield RecursiveAnalyzeFlakePipeline(parameters)
Пример #4
0
    def RunImpl(self, parameters):
        """Determines the Isolated sha to run in swarming given a commit position.

    If the requested commit position maps directly to a  build, simply get that
    existing build's isolated sha. Otherwise, trigger a try job to compile and
    isolate at that revision and return the resulting sha.
    """
        analysis = ndb.Key(urlsafe=parameters.analysis_urlsafe_key).get()
        assert analysis

        master_name = analysis.master_name
        builder_name = analysis.builder_name
        commit_position = parameters.commit_position
        step_name = analysis.step_name
        isolate_target_name = parameters.step_metadata.isolate_target_name

        reference_build_info = build_util.GetBuildInfo(master_name,
                                                       builder_name,
                                                       analysis.build_number)
        parent_mastername = (reference_build_info.parent_mastername
                             or master_name)
        parent_buildername = (reference_build_info.parent_buildername
                              or builder_name)

        targets = (IsolatedTarget.FindIsolateAtOrAfterCommitPositionByMaster(
            parent_mastername, parent_buildername, constants.GITILES_HOST,
            constants.GITILES_PROJECT, constants.GITILES_REF,
            isolate_target_name, commit_position))

        # TODO(crbug.com/872992): Remove this entire branch's fallback logic once
        # LUCI migration is complete.
        if not targets:
            analysis.LogInfo((
                'No IsolatedTargets found for {}/{} with minimum commit position '
                '{}. Falling back to searching buildbot').format(
                    master_name, builder_name, commit_position))
            _, earliest_containing_build = step_util.GetValidBoundingBuildsForStep(
                master_name, builder_name, step_name, None,
                parameters.upper_bound_build_number, commit_position)

            assert earliest_containing_build, (
                'Unable to find nearest build cycle with minimum commit position '
                '{}'.format(commit_position))

            build_commit_position = earliest_containing_build.commit_position
            assert build_commit_position >= commit_position, (
                'Upper bound build commit position {} is before {}'.format(
                    build_commit_position, commit_position))

            if build_commit_position == commit_position:  # pragma: no branch
                get_build_sha_parameters = self.CreateInputObjectInstance(
                    GetIsolateShaForBuildParameters,
                    master_name=master_name,
                    builder_name=builder_name,
                    build_number=earliest_containing_build.build_number,
                    step_name=step_name,
                    url=buildbot.CreateBuildUrl(
                        master_name, builder_name,
                        earliest_containing_build.build_number))
                yield GetIsolateShaForBuildPipeline(get_build_sha_parameters)
                return

        if targets:
            upper_bound_target = targets[0]
            if upper_bound_target.commit_position == commit_position:
                # The requested commit position is that of a found IsolatedTarget.
                get_target_input = GetIsolateShaForTargetInput(
                    isolated_target_urlsafe_key=upper_bound_target.key.urlsafe(
                    ))
                yield GetIsolateShaForTargetPipeline(get_target_input)
                return

        # The requested commit position needs to be compiled.
        cache_name = swarmbot_util.GetCacheName(
            parent_mastername,
            parent_buildername,
            suffix=flake_constants.FLAKE_CACHE_SUFFIX)
        test_name = analysis.test_name

        try_job = flake_try_job.GetTryJob(master_name, builder_name, step_name,
                                          test_name, parameters.revision)
        run_flake_try_job_parameters = self.CreateInputObjectInstance(
            RunFlakeTryJobParameters,
            analysis_urlsafe_key=parameters.analysis_urlsafe_key,
            revision=parameters.revision,
            flake_cache_name=cache_name,
            dimensions=parameters.dimensions,
            isolate_target_name=isolate_target_name,
            urlsafe_try_job_key=try_job.key.urlsafe())

        with pipeline.InOrder():
            try_job_result = yield RunFlakeTryJobPipeline(
                run_flake_try_job_parameters)
            get_isolate_sha_from_try_job_input = self.CreateInputObjectInstance(
                GetIsolateShaForTryJobParameters,
                try_job_result=try_job_result,
                step_name=step_name)
            yield GetIsolateShaForTryJobPipeline(
                get_isolate_sha_from_try_job_input)
    def RunImpl(self, pipeline_input):

        master_name, builder_name, build_number = (
            pipeline_input.build_key.GetParts())

        build_failure_analysis.ResetAnalysisForANewAnalysis(
            master_name,
            builder_name,
            build_number,
            build_completed=pipeline_input.build_completed,
            pipeline_status_path=self.pipeline_status_path,
            current_version=appengine_util.GetCurrentVersion())

        # TODO(crbug/869684): Use a gauge metric to track intermittent statuses.

        # The yield statements below return PipelineFutures, which allow subsequent
        # pipelines to refer to previous output values.
        # https://github.com/GoogleCloudPlatform/appengine-pipelines/wiki/Python

        # Heuristic Approach.
        heuristic_params = TestHeuristicAnalysisParameters(
            failure_info=pipeline_input.current_failure_info,
            build_completed=pipeline_input.build_completed)
        heuristic_result = yield HeuristicAnalysisForTestPipeline(
            heuristic_params)

        # Try job approach.
        with pipeline.InOrder():
            run_tasks_inputs = self.CreateInputObjectInstance(
                RunSwarmingTasksInput,
                build_key=pipeline_input.build_key,
                heuristic_result=heuristic_result,
                force=pipeline_input.force)
            # Swarming rerun.
            # Triggers swarming tasks when first time test failure happens.
            # This pipeline will run before build completes.
            yield RunSwarmingTasksPipeline(run_tasks_inputs)

            collect_task_results_inputs = self.CreateInputObjectInstance(
                CollectSwarmingTaskResultsInputs,
                build_key=pipeline_input.build_key,
                build_completed=pipeline_input.build_completed)
            # An async pipeline that queries swarming tasks periodically until all
            # swarming tasks completes and return consistent failures.
            consistent_failures = yield CollectSwarmingTaskResultsPipeline(
                collect_task_results_inputs)

            start_waterfall_try_job_inputs = self.CreateInputObjectInstance(
                StartTestTryJobInputs,
                build_key=pipeline_input.build_key,
                build_completed=pipeline_input.build_completed,
                force=pipeline_input.force,
                heuristic_result=heuristic_result,
                consistent_failures=consistent_failures)
            yield StartTestTryJobPipeline(start_waterfall_try_job_inputs)

            if not pipeline_input.force:
                # Report event to BQ.
                report_event_input = self.CreateInputObjectInstance(
                    report_event_pipeline.ReportEventInput,
                    analysis_urlsafe_key=WfAnalysis.Get(
                        master_name, builder_name, build_number).key.urlsafe())
                yield report_event_pipeline.ReportAnalysisEventPipeline(
                    report_event_input)

            # Trigger flake analysis on flaky tests, if any.
            yield TriggerFlakeAnalysesPipeline(pipeline_input.build_key)
    def RunImpl(self, parameters):
        """Pipeline to find the true pass rate of a test at a commit position."""
        master_name = parameters.master_name
        builder_name = parameters.builder_name
        reference_build_number = parameters.reference_build_number
        step_name = parameters.step_name
        test_name = parameters.test_name
        commit_position = parameters.commit_position
        get_isolate_sha_output = parameters.get_isolate_sha_output
        build_url = get_isolate_sha_output.build_url
        try_job_url = get_isolate_sha_output.try_job_url
        flakiness_thus_far = parameters.flakiness_thus_far
        previous_swarming_task_output = parameters.previous_swarming_task_output

        # Extract pass rate and iterations already-completed up to this point.
        if previous_swarming_task_output:
            assert flakiness_thus_far, (
                'Previous swarming task output not captured properly')
            error = previous_swarming_task_output.error
            pass_rate_at_commit_position = flakiness_thus_far.pass_rate
            previous_pass_count = previous_swarming_task_output.pass_count
            previous_iterations = previous_swarming_task_output.iterations
            previous_pass_rate = (float(previous_pass_count /
                                        previous_iterations)
                                  if previous_iterations else None)
        else:
            error = None
            pass_rate_at_commit_position = None
            previous_iterations = 0
            previous_pass_count = 0
            previous_pass_rate = None

            # Create a fresh Flakiness instance to aggregate swarming rerun data.
            flakiness_thus_far = Flakiness(
                build_number=get_isolate_sha_output.build_number,
                build_url=build_url,
                commit_position=commit_position,
                total_test_run_seconds=0,
                error=None,
                failed_swarming_task_attempts=0,
                iterations=0,
                pass_rate=None,
                revision=parameters.revision,
                task_ids=ListOfBasestring.FromSerializable([]),
                try_job_url=try_job_url)

        # Bail out if there were too many errors.
        if (error and flakiness_util.MaximumSwarmingTaskRetriesReached(
                flakiness_thus_far)):
            logging.error(
                'Swarming task ended in error after %d failed attempts. Giving '
                'up' % flakiness_thus_far.failed_swarming_task_attempts)
            flakiness_thus_far.error = error
            yield AggregateFlakinessPipeline(
                self.CreateInputObjectInstance(
                    AggregateFlakinessInput,
                    flakiness_thus_far=flakiness_thus_far,
                    incoming_swarming_task_output=None))
            return

        # Move on if the maximum number of iterations has been reached or exceeded.
        if flakiness_util.MaximumIterationsReached(flakiness_thus_far):
            logging.info('Max iterations reached for commit_position %d' %
                         commit_position)
            yield AggregateFlakinessPipeline(
                self.CreateInputObjectInstance(
                    AggregateFlakinessInput,
                    flakiness_thus_far=flakiness_thus_far,
                    incoming_swarming_task_output=None))
            return

        # Move on if the test doesn't exist.
        if pass_rate_util.TestDoesNotExist(pass_rate_at_commit_position):
            logging.info('No test found at commit position %d' %
                         commit_position)
            yield AggregateFlakinessPipeline(
                self.CreateInputObjectInstance(
                    AggregateFlakinessInput,
                    flakiness_thus_far=flakiness_thus_far,
                    incoming_swarming_task_output=None))
            return

        # Move on if there is sufficient information about the pass rate.
        if pass_rate_util.HasSufficientInformation(
                pass_rate_at_commit_position, flakiness_thus_far.iterations,
                previous_pass_rate, previous_iterations):
            logging.info(
                'There is sufficient information for commit position %d with pass '
                'rate %s after %d iterations' %
                (commit_position, pass_rate_at_commit_position,
                 flakiness_thus_far.iterations))
            yield AggregateFlakinessPipeline(
                self.CreateInputObjectInstance(
                    AggregateFlakinessInput,
                    flakiness_thus_far=flakiness_thus_far,
                    incoming_swarming_task_output=None))
            return

        # Another swarming task is needed. Determine parameters for it to run.
        iterations_for_task, time_for_task_seconds = (
            run_swarming_util.CalculateRunParametersForSwarmingTask(
                flakiness_thus_far, error))

        # Run swarming task, update data points with results, and recurse.
        with pipeline.InOrder():
            swarming_task_output = yield RunFlakeSwarmingTaskPipeline(
                self.CreateInputObjectInstance(
                    RunFlakeSwarmingTaskInput,
                    master_name=master_name,
                    builder_name=builder_name,
                    reference_build_number=reference_build_number,
                    step_name=step_name,
                    test_name=test_name,
                    commit_position=commit_position,
                    isolate_sha=get_isolate_sha_output.isolate_sha,
                    iterations=iterations_for_task,
                    timeout_seconds=time_for_task_seconds))

            aggregated_flakiness = yield AggregateFlakinessPipeline(
                self.CreateInputObjectInstance(
                    AggregateFlakinessInput,
                    flakiness_thus_far=flakiness_thus_far,
                    incoming_swarming_task_output=swarming_task_output))

            yield DetermineApproximatePassRatePipelineWrapper(
                self.CreateInputObjectInstance(
                    DetermineApproximatePassRateInput,
                    builder_name=parameters.builder_name,
                    commit_position=commit_position,
                    flakiness_thus_far=aggregated_flakiness,
                    get_isolate_sha_output=get_isolate_sha_output,
                    master_name=parameters.master_name,
                    previous_swarming_task_output=swarming_task_output,
                    reference_build_number=parameters.reference_build_number,
                    revision=parameters.revision,
                    step_name=parameters.step_name,
                    test_name=parameters.test_name))