Пример #1
0
def StepIsSupportedForMaster(master_name, builder_name, build_number,
                             step_name):
    if step_name == 'compile':
        canonical_step_name = step_name
    else:
        canonical_step_name = GetCanonicalStepName(master_name, builder_name,
                                                   build_number, step_name)
    return waterfall_config.StepIsSupportedForMaster(canonical_step_name,
                                                     master_name)
Пример #2
0
def IsStepSupportedByFindit(test_result_object, step_name, master_name):
    """Checks if a test step is currently supported by Findit.

  Currently Findit supports all gtest test steps;
  for isolated-script-tests, Findit only supports webkit_layout_tests.

  * If there isn't a parser for the test_result of the step, it's not supported;
  * If the step is an isolated-script-test step but not webkit_layout_tests,
    it's not supported.
  * If the step is set to unsupported in config, it's not supported.
  """
    if not test_result_object:
        return False

    if not waterfall_config.StepIsSupportedForMaster(step_name, master_name):
        return False

    # TODO(crbug/836317): remove the special check for step_name when Findit
    # supports all isolated_script_tests.
    if (isinstance(test_result_object, WebkitLayoutTestResults)
            and step_name not in SUPPORTED_ISOLATED_SCRIPT_TESTS):
        return False
    return True
 def testStepIsSupportedForMaster(self):
     self.assertFalse(
         waterfall_config.StepIsSupportedForMaster('unsupported_step1',
                                                   'master1'))
     self.assertTrue(
         waterfall_config.StepIsSupportedForMaster('step4', 'master1'))
     self.assertTrue(
         waterfall_config.StepIsSupportedForMaster('step4', 'master2'))
     self.assertFalse(
         waterfall_config.StepIsSupportedForMaster('blabla', 'blabla'))
     self.assertTrue(
         waterfall_config.StepIsSupportedForMaster('step4', 'master2'))
     self.assertTrue(
         waterfall_config.StepIsSupportedForMaster('blabla', 'master3'))
     self.assertTrue(
         waterfall_config.StepIsSupportedForMaster('step5', 'master1'))
     self.assertTrue(
         waterfall_config.StepIsSupportedForMaster('step5', 'master2'))
     self.assertFalse(
         waterfall_config.StepIsSupportedForMaster('unsupported_step7',
                                                   'master2'))
     self.assertTrue(
         waterfall_config.StepIsSupportedForMaster('unsupported_step6',
                                                   'master1'))
     self.assertFalse(
         waterfall_config.StepIsSupportedForMaster('unsupported_step6',
                                                   'master2'))
     self.assertFalse(
         waterfall_config.StepIsSupportedForMaster('unsupported_step6',
                                                   'master3'))
     self.assertFalse(
         waterfall_config.StepIsSupportedForMaster('unsupported_step7',
                                                   'master3'))
    def run(self, failure_info):
        """Extracts failure signals from failed steps.

    Args:
      failure_info (dict): Output of pipeline DetectFirstFailurePipeline.run().

    Returns:
      A dict like below:
      {
        'step_name1': waterfall.failure_signal.FailureSignal.ToDict(),
        ...
      }
    """
        signals = {}
        if not failure_info['failed'] or not failure_info['chromium_revision']:
            # Bail out if no failed step or no chromium revision.
            return signals

        # Bail out on infra failure
        if failure_info.get('failure_type') == failure_type.INFRA:
            return signals

        master_name = failure_info['master_name']
        builder_name = failure_info['builder_name']
        build_number = failure_info['build_number']

        for step_name in failure_info.get('failed_steps', []):
            if not waterfall_config.StepIsSupportedForMaster(
                    step_name, master_name):
                # Bail out if the step is not supported.
                continue

            step = WfStep.Get(master_name, builder_name, build_number,
                              step_name)
            if step and step.log_data:
                failure_log = step.log_data
            else:
                # TODO: do test-level analysis instead of step-level.
                # TODO: Use swarming test result instead of archived gtest results
                gtest_result = buildbot.GetGtestResultLog(
                    master_name, builder_name, build_number, step_name)
                if gtest_result:
                    failure_log = _GetReliableTestFailureLog(gtest_result)

                if gtest_result is None or failure_log == 'invalid':
                    if not lock_util.WaitUntilDownloadAllowed(
                            master_name):  # pragma: no cover
                        raise pipeline.Retry(
                            'Failed to pull log of step %s of master %s' %
                            (step_name, master_name))
                    try:
                        failure_log = buildbot.GetStepLog(
                            master_name, builder_name, build_number, step_name,
                            self.HTTP_CLIENT)
                    except ResponseTooLargeError:  # pragma: no cover.
                        logging.exception(
                            'Log of step "%s" is too large for urlfetch.',
                            step_name)
                        # If the stdio log of a step is too large, we don't want to pull it
                        # again in next run, because that might lead to DDoS to the master.
                        # TODO: Use archived stdio logs in Google Storage instead.
                        failure_log = 'Stdio log is too large for urlfetch.'

                    if not failure_log:  # pragma: no cover
                        raise pipeline.Retry(
                            'Failed to pull stdio of step %s of master %s' %
                            (step_name, master_name))

                # Save step log in datastore and avoid downloading again during retry.
                if not step:  # pragma: no cover
                    step = WfStep.Create(master_name, builder_name,
                                         build_number, step_name)

                step.log_data = _ExtractStorablePortionOfLog(failure_log)

                try:
                    step.put()
                except Exception as e:  # pragma: no cover
                    # Sometimes, the step log is too large to save in datastore.
                    logging.exception(e)

            # TODO: save result in datastore?
            if step.isolated:
                try:
                    json_failure_log = (json.loads(failure_log)
                                        if failure_log != 'flaky' else {})
                except ValueError:  # pragma: no cover
                    json_failure_log = {}
                    logging.warning('failure_log %s is not valid JSON.' %
                                    failure_log)

                signals[step_name] = {'tests': {}}
                step_signal = FailureSignal()

                for test_name, test_failure_log in json_failure_log.iteritems(
                ):
                    signals[step_name]['tests'][
                        test_name] = extractors.ExtractSignal(
                            master_name, builder_name, step_name, test_name,
                            base64.b64decode(test_failure_log)).ToDict()

                    # Save signals in test failure log to step level.
                    step_signal.MergeFrom(
                        signals[step_name]['tests'][test_name])

                signals[step_name]['files'] = step_signal.files
                signals[step_name]['keywords'] = step_signal.keywords
            else:
                signals[step_name] = extractors.ExtractSignal(
                    master_name, builder_name, step_name, None,
                    failure_log).ToDict()

        return signals
def AnalyzeBuildFailure(failure_info, change_logs, deps_info, failure_signals):
    """Analyzes the given failure signals, and figure out culprit CLs.

  Args:
    failure_info (dict): Output of pipeline DetectFirstFailurePipeline.
    change_logs (dict): Output of pipeline PullChangelogPipeline.
    deps_info (dict): Output of pipeline ExtractDEPSInfoPipeline.
    failure_signals (dict): Output of pipeline ExtractSignalPipeline.

  Returns:
    A dict with the following form:
    {
      'failures': [
        {
          'step_name': 'compile',
          'supported': True
          'first_failure': 230,
          'last_pass': 229,
          'suspected_cls': [
            {
              'build_number': 230,
              'repo_name': 'chromium',
              'revision': 'a_git_hash',
              'commit_position': 56789,
              'score': 11,
              'hints': {
                'add a/b/x.cc': 5,
                'delete a/b/y.cc': 5,
                'modify e/f/z.cc': 1,
                ...
              }
            },
            ...
          ],
        },
        ...
      ]
    }

    And a list of suspected_cls format as below:
    [
        {
            'repo_name': 'chromium',
            'revision': 'r98_1',
            'commit_position': None,
            'url': None,
            'failures': {
                'b': ['Unittest2.Subtest1', 'Unittest3.Subtest2']
            },
            'top_score': 4
        },
        ...
    ]
  """
    analysis_result = {'failures': []}

    if not failure_info['failed'] or not failure_info['chromium_revision']:
        # Bail out if no failed step or no chromium revision.
        return analysis_result, []

    # Bail out on infra failure
    if failure_info.get('failure_type') == failure_type.INFRA:
        return analysis_result, []

    def CreateCLInfoDict(justification_dict, build_number, change_log):
        # TODO(stgao): remove hard-coded 'chromium' when DEPS file parsing is
        # supported.
        cl_info = {
            'build_number':
            build_number,
            'repo_name':
            'chromium',
            'revision':
            change_log['revision'],
            'commit_position':
            change_log.get('commit_position'),
            'url':
            change_log.get('code_review_url') or change_log.get('commit_url'),
        }

        cl_info.update(justification_dict)
        return cl_info

    failed_steps = failure_info['failed_steps']
    builds = failure_info['builds']
    master_name = failure_info['master_name']

    cl_failure_map = defaultdict(_CLInfo)

    for step_name, step_failure_info in failed_steps.iteritems():
        is_test_level = step_failure_info.get('tests') is not None

        failed_build_number = step_failure_info['current_failure']
        if step_failure_info.get('last_pass') is not None:
            start_build_number = step_failure_info.get('last_pass') + 1
        else:
            start_build_number = step_failure_info['first_failure']
        step_analysis_result = {
            'step_name':
            step_name,
            'first_failure':
            step_failure_info['first_failure'],
            'last_pass':
            step_failure_info.get('last_pass'),
            'suspected_cls': [],
            'supported':
            waterfall_config.StepIsSupportedForMaster(step_name, master_name)
        }

        if is_test_level:
            step_analysis_result['tests'] = []
            for test_name, test_failure in step_failure_info[
                    'tests'].iteritems():
                test_analysis_result = {
                    'test_name': test_name,
                    'first_failure': test_failure['first_failure'],
                    'last_pass': test_failure.get('last_pass'),
                    'suspected_cls': [],
                }
                step_analysis_result['tests'].append(test_analysis_result)

        if step_analysis_result['supported']:
            for build_number in range(start_build_number,
                                      failed_build_number + 1):
                for revision in builds[str(build_number)]['blame_list']:
                    if is_test_level:
                        # Checks files at test level.
                        for test_analysis_result in step_analysis_result[
                                'tests']:
                            test_name = test_analysis_result['test_name']
                            test_signal = FailureSignal.FromDict(
                                failure_signals[step_name]['tests'].get(
                                    test_name, {}))

                            justification_dict = _CheckFiles(
                                test_signal, change_logs[revision], deps_info)

                            if not justification_dict:
                                continue

                            new_suspected_cl_dict = CreateCLInfoDict(
                                justification_dict, build_number,
                                change_logs[revision])
                            test_analysis_result['suspected_cls'].append(
                                new_suspected_cl_dict)

                            _SaveFailureToMap(
                                cl_failure_map, new_suspected_cl_dict,
                                step_name, test_name,
                                max(justification_dict['hints'].values()))

                    # Checks Files on step level using step level signals
                    # regardless of test level signals so we can make sure
                    # no duplicate justifications added to the step result.
                    failure_signal = FailureSignal.FromDict(
                        failure_signals[step_name])
                    justification_dict = _CheckFiles(failure_signal,
                                                     change_logs[revision],
                                                     deps_info)

                    if not justification_dict:
                        continue

                    new_suspected_cl_dict = CreateCLInfoDict(
                        justification_dict, build_number,
                        change_logs[revision])
                    step_analysis_result['suspected_cls'].append(
                        new_suspected_cl_dict)

                    if not is_test_level:
                        _SaveFailureToMap(
                            cl_failure_map, new_suspected_cl_dict, step_name,
                            None, max(justification_dict['hints'].values()))

        # TODO(stgao): sort CLs by score.
        analysis_result['failures'].append(step_analysis_result)

    suspected_cls = _ConvertCLFailureMapToList(cl_failure_map)

    return analysis_result, suspected_cls