コード例 #1
0
 def testGetStepMetadataCached(self, mock_fn, *_):
     mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]
     # Returns the invalid step_metadata but not cache it.
     self.assertEqual(
         None, step_util.GetStepMetadata(123, 'step_name on a platform'))
     self.assertTrue(mock_fn.call_count == 1)
     # Returns the valid step_metadata and cache it.
     self.assertEqual({'canonical_step_name': 'step_name'},
                      step_util.GetStepMetadata(123,
                                                'step_name on a platform'))
     self.assertTrue(mock_fn.call_count == 2)
     self.assertEqual({'canonical_step_name': 'step_name'},
                      step_util.GetStepMetadata(123,
                                                'step_name on a platform'))
     self.assertTrue(mock_fn.call_count == 2)
コード例 #2
0
def GetTestLocation(build_id, step_name, test_name, normalized_test_name):
    """Returns a TestLocation for the given test.

  Currently only supports webkit_layout_tests and Gtests.

  Args:
    build_id (int): Build id of the build.
    step_name (str): The name of the step.
    test_name (str): The name of the test.
    normalized_step_name (str): The normalized version of the step name.
  """
    if 'webkit_layout_tests' in step_name:
        # For Webkit layout tests, assume that the normalized test name is
        # the directory name.
        return TestLocation(
            file_path=_NormalizePath('third_party/blink/web_tests/%s' %
                                     normalized_test_name))
    if not test_name_util.GTEST_REGEX.match(normalized_test_name):
        return None

    # For Gtest, we read the test location from the output.json
    step_metadata = step_util.GetStepMetadata(build_id,
                                              step_name,
                                              partial_match=True)
    task_ids = step_metadata.get('swarm_task_ids')
    for task_id in task_ids:
        test_path = swarmed_test_util.GetTestLocation(task_id, test_name)
        if test_path:
            return TestLocation(file_path=_NormalizePath(test_path.file),
                                line_number=test_path.line)
    return None
コード例 #3
0
    def RunImpl(self, build_key):
        """Triggers flake analyses for flaky tests found by CI failure analysis."""
        master_name, builder_name, build_number = build_key.GetParts()
        flake_settings = waterfall_config.GetCheckFlakeSettings()
        throttled = flake_settings.get('throttle_flake_analyses', True)

        analysis = WfAnalysis.Get(master_name, builder_name, build_number)

        if not analysis or not analysis.flaky_tests:
            return

        analysis_counts = defaultdict(lambda: defaultdict(int))
        for step, flaky_tests in analysis.flaky_tests.iteritems():
            logging.info('%s/%s/%s/%s has %s flaky tests.', master_name,
                         builder_name, build_number, step, len(flaky_tests))

            for test_name in flaky_tests:
                # TODO(crbug.com/904050): Deprecate FlakeAnalysisRequest in favor of
                # Flake.
                flake = flake_util.GetFlake(_LUCI_PROJECT, step, test_name,
                                            master_name, builder_name,
                                            build_number)
                request = FlakeAnalysisRequest.Create(test_name, False, None)
                request.AddBuildStep(master_name, builder_name, build_number,
                                     step, time_util.GetUTCNow())
                request.flake_key = flake.key
                scheduled = flake_analysis_service.ScheduleAnalysisForFlake(
                    request, '*****@*****.**',
                    False, triggering_sources.FINDIT_PIPELINE)
                if scheduled:  # pragma: no branch
                    analysis_counts[step]['analyzed'] += 1
                    logging.info(
                        'A flake analysis has been triggered for %s/%s', step,
                        test_name)
                    if throttled and len(flaky_tests) > 1:
                        logging.info(
                            'Throttling is enabled, skipping %d tests.',
                            len(flaky_tests) - 1)
                        analysis_counts[step]['throttled'] = len(
                            flaky_tests) - 1
                        break  # If we're throttled, stop after the first.
            else:
                analysis_counts[step]['error'] += 1

        for step, step_counts in analysis_counts.iteritems():
            # Collects metrics.
            step_metadata = step_util.GetStepMetadata(master_name,
                                                      builder_name,
                                                      build_number, step)
            canonical_step_name = step_metadata.get(
                'canonical_step_name') or 'Unknown'
            isolate_target_name = step_metadata.get(
                'isolate_target_name') or 'Unknown'

            for operation, count in step_counts.iteritems():
                monitoring.OnFlakeIdentified(canonical_step_name,
                                             isolate_target_name, operation,
                                             count)
コード例 #4
0
def _GetTestLocation(flake_occurrence):
    """Returns a TestLocation for the given FlakeOccurrence instance."""
    step_metadata = step_util.GetStepMetadata(
        flake_occurrence.build_configuration.legacy_master_name,
        flake_occurrence.build_configuration.luci_builder,
        flake_occurrence.build_configuration.legacy_build_number,
        flake_occurrence.step_ui_name)
    task_ids = step_metadata.get('swarm_task_ids')
    for task_id in task_ids:
        test_path = swarmed_test_util.GetTestLocation(
            task_id, flake_occurrence.test_name)
        if test_path:
            return TestLocation(file_path=_NormalizePath(test_path.file),
                                line_number=test_path.line)
    return None
コード例 #5
0
 def testGetStepMetadataPartialMatch(self, mock_step_log):
     step_util.GetStepMetadata(123, 'step', True)
     self.assertIn(True, mock_step_log.call_args[0])
     step_util.GetStepMetadata(123, 'step', False)
     self.assertIn(False, mock_step_log.call_args[0])
コード例 #6
0
 def testGetStepMetadata(self, cases, mock_step_log):
     mock_step_log.return_value = cases['step_log_return']
     step_metadata = step_util.GetStepMetadata(123, 'step')
     self.assertEqual(cases['expected_step_metadata'], step_metadata)
コード例 #7
0
def FindMatchingWaterfallStep(build_step, test_name):
    """Finds the matching Waterfall step and checks whether it is supported.

  Only Swarmed and gtest-based steps are supported at the moment.

  Args:
    build_step (BuildStep): A build step on Waterfall or Commit Queue. It
        will be updated with the matching Waterfall step and whether it is
        Swarmed and supported.
    test_name (str): The name of the test.
  """

    build_step.swarmed = False
    build_step.supported = False

    http_client = FinditHttpClient()

    if build_step.on_cq:
        wf_master_name, wf_builder_name, wf_build_number, wf_step_name, metadata = (
            _GetMatchingWaterfallBuildStep(build_step, http_client))

        build_step.wf_master_name = wf_master_name
        build_step.wf_builder_name = wf_builder_name
        build_step.wf_build_number = wf_build_number
        build_step.wf_step_name = wf_step_name

        if not build_step.has_matching_waterfall_step:
            return
    else:
        build_step.wf_master_name = build_step.master_name
        build_step.wf_builder_name = build_step.builder_name
        build_step.wf_build_number = build_step.build_number
        build_step.wf_step_name = build_step.step_name
        metadata = build_step.step_metadata or step_util.GetStepMetadata(
            build_step.master_name, build_step.builder_name,
            build_step.build_number, build_step.step_name)
        if not metadata:
            logging.error('Couldn\'t get step_metadata')
            return

    build_step.step_metadata = metadata
    # Query Swarming for isolated data.
    build_step.swarmed = True if metadata.get('swarm_task_ids') else False

    if build_step.swarmed:
        need_to_continue = False
        for task_id in metadata['swarm_task_ids']:
            output = swarmed_test_util.GetTestResultForSwarmingTask(
                task_id, http_client)
            if output:
                # Guess from the format.
                test_result_object = test_results_util.GetTestResultObject(
                    output, partial_result=True)
                if not test_result_object:
                    build_step.supported = False
                elif not step_util.IsStepSupportedByFindit(
                        test_result_object,
                        metadata.get('canonical_step_name')
                        or build_step.step_name, build_step.wf_master_name):
                    build_step.supported = False
                else:
                    test_exists = test_result_object.DoesTestExist(test_name)
                    if test_exists:
                        build_step.supported = True
                    elif test_result_object.contains_all_tests:
                        # There is no such test for sure.
                        build_step.supported = False
                    else:
                        # Test is not in this task, but cannot determine if it's in other
                        # tasks.
                        need_to_continue = True
            if not need_to_continue:
                break
コード例 #8
0
def _GetMatchingWaterfallBuildStep(cq_build_step, http_client):
    """Returns the matching Waterfall build step of the given CQ one.

  Args:
    cq_build_step (BuildStep): A build step on Commit Queue.
    http_client (RetryHttpClient): A http client to send http requests.

  Returns:
      (master_name, builder_name, build_number, step_name, step_metadata)
    or
      None
  """
    no_matching_result = (None, None, None, None, None)

    # 0. Get step_metadata.
    step_metadata = cq_build_step.step_metadata or step_util.GetStepMetadata(
        cq_build_step.master_name, cq_build_step.builder_name,
        cq_build_step.build_number, cq_build_step.step_name)
    if not step_metadata:
        logging.error('Couldn\'t get step_metadata')
        return no_matching_result

    # 1. Map a cq trybot to the matching waterfall buildbot:
    # get master_name and builder_name.
    wf_master_name = step_metadata.get('waterfall_mastername')
    wf_builder_name = step_metadata.get('waterfall_buildername')
    if not wf_master_name or not wf_builder_name:
        # Either waterfall_mastername or waterfall_buildername doesn't exist.
        logging.info('%s/%s has no matching Waterfall buildbot',
                     cq_build_step.master_name, cq_build_step.builder_name)
        return no_matching_result  # No matching Waterfall buildbot.

    # 2. Get "name" of the CQ trybot step.

    # Name of the step in the tags of a Swarming task.
    # Can't use step name, as cq one is with "(with patch)" while waterfall one
    # without.
    name = step_metadata.get('canonical_step_name')
    # The OS in which the test runs on. The same test binary might run on two
    # different OS platforms.
    os_name = step_metadata.get('dimensions', {}).get('os')
    if not name or not os_name:
        logging.error('Couldn\'t find name/os')
        return no_matching_result  # No name of the step.

    # TODO: cache and throttle QPS to the same master.
    # 3. Retrieve latest completed build cycle on the buildbot.
    builds = buildbot.GetRecentCompletedBuilds(wf_master_name,
                                               wf_builder_name,
                                               page_size=1)
    if not builds:
        logging.error('Couldn\'t find latest builds.')
        return no_matching_result  # No name of the step.

    # 4. Check whether there is matching step.
    tasks = swarming.ListSwarmingTasksDataByTags(http_client,
                                                 wf_master_name,
                                                 wf_builder_name,
                                                 builds[0],
                                                 additional_tag_filters={
                                                     'name': name,
                                                     'os': os_name
                                                 })
    if tasks:  # One matching buildbot is found.
        wf_step_name = tasks[0].tags['stepname'][0] if 'stepname' in tasks[
            0].tags else ''
        logging.info('%s/%s/%s is mapped to %s/%s/%s',
                     cq_build_step.master_name, cq_build_step.builder_name,
                     cq_build_step.step_name, wf_master_name, wf_builder_name,
                     wf_step_name)
        return (wf_master_name, wf_builder_name, builds[0], wf_step_name,
                step_metadata)

    return no_matching_result
コード例 #9
0
def ScheduleAnalysisIfNeeded(
        normalized_test,
        original_test,
        flake_key,
        bug_id=None,
        allow_new_analysis=False,
        force=False,
        manually_triggered=False,
        user_email=None,
        triggering_source=triggering_sources.FINDIT_PIPELINE,
        queue_name=constants.DEFAULT_QUEUE):
    """Schedules an analysis if needed and returns the MasterFlakeAnalysis.

  When the build failure was already analyzed and a new analysis is scheduled,
  the returned WfAnalysis will still have the result of last completed analysis.

  Args:
    normalized_test (TestInfo): Info of the normalized flaky test after mapping
      a CQ trybot step to a Waterfall buildbot step, striping prefix "PRE_"
      from a gtest, etc.
    original_test (TestInfo): Info of the original flaky test.
    flake_key (ndb.Key): The key to the Flake responsible for triggering this
      analysis.
    bug_id (int): The monorail bug id to update when analysis is done.
    allow_new_analysis (bool): Indicate whether a new analysis is allowed.
    force (bool): Indicate whether to force a rerun of current analysis.
    manually_triggered (bool): True if the analysis was requested manually,
      such as by a Chromium sheriff.
    user_email (str): The email of the user requesting the analysis.
    triggering_source (int): From where this analysis was triggered, such as
      through Findit pipeline, UI, or through Findit API.
    queue_name (str): The App Engine queue to run the analysis.

  Returns:
    A MasterFlakeAnalysis instance.
    None if no analysis was scheduled and the user has no permission to.
  """
    need_new_analysis, analysis = _NeedANewAnalysis(
        normalized_test,
        original_test,
        flake_key,
        bug_id=bug_id,
        allow_new_analysis=allow_new_analysis,
        force=force,
        user_email=user_email,
        triggering_source=triggering_source)

    if need_new_analysis:
        # _NeedANewAnalysis just created master_flake_analysis. Use the latest
        # version number and pass that along to the other pipelines for updating
        # results and data.
        logging.info(
            'A new master flake analysis was successfully saved for %s (%s) and '
            'will be captured in version %s', repr(normalized_test),
            repr(original_test), analysis.version_number)

        step_metadata = (step_util.GetStepMetadata(
            normalized_test.master_name, normalized_test.builder_name,
            normalized_test.build_number,
            normalized_test.step_name) or step_util.GetStepMetadata(
                original_test.master_name, original_test.builder_name,
                original_test.build_number, original_test.step_name))

        logging.info('Initializing flake analysis pipeline for key: %s',
                     analysis.key)

        starting_build_info = build_util.GetBuildInfo(
            normalized_test.master_name, normalized_test.builder_name,
            normalized_test.build_number)

        original_build_info = build_util.GetBuildInfo(
            original_test.master_name, original_test.builder_name,
            original_test.build_number)

        assert starting_build_info, (
            'Failed to get starting build for flake analysis')
        starting_commit_position = starting_build_info.commit_position

        assert starting_commit_position is not None, (
            'Cannot analyze flake without a starting commit position')

        assert original_build_info, 'Failed to get original build info'

        # Get the dimensions of the bot for when try jobs are needed to compile.
        dimensions = try_job_service.GetDimensionsFromBuildInfo(
            starting_build_info)

        analyze_flake_input = AnalyzeFlakeInput(
            analysis_urlsafe_key=analysis.key.urlsafe(),
            analyze_commit_position_parameters=NextCommitPositionOutput(
                culprit_commit_id=None,
                next_commit_id=CommitID(
                    commit_position=starting_commit_position,
                    revision=starting_build_info.chromium_revision)),
            commit_position_range=IntRange(lower=None,
                                           upper=starting_commit_position),
            dimensions=ListOfBasestring.FromSerializable(dimensions),
            manually_triggered=manually_triggered,
            retries=0,
            rerun=force,
            step_metadata=StepMetadata.FromSerializable(step_metadata))

        pipeline_job = AnalyzeFlakePipeline(analyze_flake_input)

        pipeline_job.target = appengine_util.GetTargetNameForModule(
            constants.WATERFALL_BACKEND)
        pipeline_job.start(queue_name=queue_name)
        analysis.pipeline_status_path = pipeline_job.pipeline_status_path
        analysis.root_pipeline_id = pipeline_job.root_pipeline_id
        analysis.build_id = starting_build_info.buildbucket_id
        analysis.original_build_id = original_build_info.buildbucket_id
        analysis.put()
        analysis.LogInfo((
            'A flake analysis was scheduled using commit-based pipelines with '
            'path {}').format(pipeline_job.pipeline_status_path))
    else:
        logging.info('A flake analysis not necessary for build %s, %s, %s, %s',
                     normalized_test.master_name, normalized_test.builder_name,
                     normalized_test.build_number, normalized_test.step_name)

    return analysis