def testBuildFailurePipelineFlow(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 124
    current_failure_info = {}

    self._SetupAnalysis(master_name, builder_name, build_number)

    heuristic_params = CompileHeuristicAnalysisParameters.FromSerializable({
        'failure_info': current_failure_info,
        'build_completed': False
    })
    heuristic_output = CompileHeuristicAnalysisOutput.FromSerializable({
        'failure_info': None,
        'signals': None,
        'heuristic_result': {}
    })
    self.MockSynchronousPipeline(
        analyze_compile_failure_pipeline.HeuristicAnalysisForCompilePipeline,
        heuristic_params, heuristic_output)

    start_try_job_params = StartCompileTryJobInput(
        build_key=BuildKey(
            master_name=master_name,
            builder_name=builder_name,
            build_number=build_number),
        heuristic_result=heuristic_output,
        build_completed=False,
        force=False)
    self.MockGeneratorPipeline(
        analyze_compile_failure_pipeline.StartCompileTryJobPipeline,
        start_try_job_params, False)

    report_event_input = report_event_pipeline.ReportEventInput(
        analysis_urlsafe_key=WfAnalysis.Get(master_name, builder_name,
                                            build_number).key.urlsafe())
    self.MockGeneratorPipeline(
        report_event_pipeline.ReportAnalysisEventPipeline, report_event_input,
        None)

    pipeline_input = AnalyzeCompileFailureInput(
        build_key=BuildKey(
            master_name=master_name,
            builder_name=builder_name,
            build_number=build_number),
        current_failure_info=CompileFailureInfo.FromSerializable(
            current_failure_info),
        build_completed=False,
        force=False)
    root_pipeline = AnalyzeCompileFailurePipeline(pipeline_input)
    root_pipeline.start(queue_name=constants.DEFAULT_QUEUE)
    self.execute_queued_tasks()
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    self.assertEqual(analysis_status.RUNNING, analysis.status)
Пример #2
0
def _GetResultAndFailureResultMap(master_name, builder_name, build_number):
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)

    # If this analysis is part of a group, get the build analysis that opened the
    # group.
    if analysis and analysis.failure_group_key:
        analysis = WfAnalysis.Get(*analysis.failure_group_key)

    if not analysis:
        return None, None

    return analysis.result, analysis.failure_result_map
  def testAnalyzeCompileFailurePipelineAbortedIfWithError(self, mock_mon):
    master_name = 'm'
    builder_name = 'b'
    build_number = 124

    self._SetupAnalysis(
        master_name, builder_name, build_number, status=analysis_status.RUNNING)

    pipeline_input = AnalyzeCompileFailureInput(
        build_key=BuildKey(
            master_name=master_name,
            builder_name=builder_name,
            build_number=build_number),
        current_failure_info=CompileFailureInfo.FromSerializable({}),
        build_completed=False,
        force=True)
    root_pipeline = AnalyzeCompileFailurePipeline(pipeline_input)
    root_pipeline.OnAbort(pipeline_input)

    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    self.assertIsNotNone(analysis)
    self.assertEqual(analysis_status.ERROR, analysis.status)
    self.assertIsNone(analysis.result_status)
    self.assertTrue(analysis.aborted)
    mock_mon.assert_called_once_with(master_name, builder_name,
                                     analysis_status.ERROR,
                                     analysis_approach_type.HEURISTIC)
Пример #4
0
  def run(self, failure_info, change_logs, deps_info, signals):
    """
    Args:
      failure_info (dict): Output of pipeline DetectFirstFailurePipeline.
      change_logs (dict): Output of pipeline PullChangelogPipeline.
      signals (dict): Output of pipeline ExtractSignalPipeline.

    Returns:
      The same dict as the returned value of function
      build_failure_analysis.AnalyzeBuildFailure.
    """
    master_name = failure_info['master_name']
    builder_name = failure_info['builder_name']
    build_number = failure_info['build_number']

    analysis_result = build_failure_analysis.AnalyzeBuildFailure(
        failure_info, change_logs, deps_info, signals)
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    analysis.result = analysis_result
    analysis.status = wf_analysis_status.ANALYZED
    analysis.result_status = _GetResultAnalysisStatus(analysis_result)
    analysis.suspected_cls = _GetSuspectedCLs(analysis_result)
    analysis.end_time = datetime.utcnow()
    analysis.put()

    return analysis_result
  def testAppendTriageHistoryRecordWithHistory(self):
    analysis = WfAnalysis.Create(
        self.master_name, self.builder_name, self.build_number_1)
    analysis.version = 'version'
    analysis.triage_history = [{'some_info': True}]
    analysis.put()
    cl_info = '%s/%s' % (self.repo_name, self.revision_1)

    mocked_now = datetime(2017, 05, 01, 10, 10, 10)
    mocked_timestamp = calendar.timegm(mocked_now.timetuple())
    self.MockUTCNow(mocked_now)

    triage_suspected_cl._AppendTriageHistoryRecord(
        self.master_name, self.builder_name, self.build_number_1,
        cl_info, suspected_cl_status.CORRECT, 'test')
    analysis = WfAnalysis.Get(
        self.master_name, self.builder_name, self.build_number_1)

    expected_history = [
        {'some_info': True},
        {
          'triage_timestamp': mocked_timestamp,
          'user_name': 'test',
          'cl_status': suspected_cl_status.CORRECT,
          'version': 'version',
          'triaged_cl': cl_info
        }
    ]
    self.assertEqual(analysis.triage_history, expected_history)
    self.assertFalse(analysis.triage_email_obscured)
    self.assertEqual(mocked_now, analysis.triage_record_last_add)
Пример #6
0
    def _ExtractBuildInfo(self, master_name, builder_name, build_number):
        """Returns a BuildInfo instance for the specified build."""
        build = build_util.DownloadBuildData(master_name, builder_name,
                                             build_number)

        if build is None:  # pragma: no cover
            raise pipeline.Retry('Too many download from %s' % master_name)
        if not build.data:  # pragma: no cover
            return None

        build_info = buildbot.ExtractBuildInfo(master_name, builder_name,
                                               build_number, build.data)

        if not build.completed:
            build.start_time = build_info.build_start_time
            build.completed = build_info.completed
            build.result = build_info.result
            build.put()

        analysis = WfAnalysis.Get(master_name, builder_name, build_number)
        if analysis and not analysis.build_start_time:
            analysis.build_start_time = build_info.build_start_time
            analysis.put()

        return build_info
Пример #7
0
    def testModifyStatusIfDuplicateSingleAnalysisResult(self):
        analyses = self._CreateAnalyses('m', 'b', 1)

        check_duplicate_failures._ModifyStatusIfDuplicate(analyses[0])

        analysis = WfAnalysis.Get('m', 'b', 0)
        self.assertEqual(result_status.FOUND_UNTRIAGED, analysis.result_status)
Пример #8
0
def _UpdateAnalysisResultStatus(
    master_name, builder_name, build_number, correct, user_name=None):
  analysis = WfAnalysis.Get(master_name, builder_name, build_number)
  if not analysis or not analysis.completed:
    return False

  if correct:
    if analysis.suspected_cls:
      analysis.result_status = wf_analysis_result_status.FOUND_CORRECT
      analysis.culprit_cls = analysis.suspected_cls
    else:
      analysis.result_status = wf_analysis_result_status.NOT_FOUND_CORRECT
      analysis.culprit_cls = None
  else:
    analysis.culprit_cls = None
    if analysis.suspected_cls:
      analysis.result_status = wf_analysis_result_status.FOUND_INCORRECT
    else:
      analysis.result_status = wf_analysis_result_status.NOT_FOUND_INCORRECT

  triage_record = {
      'triage_timestamp': calendar.timegm(datetime.utcnow().timetuple()),
      'user_name': user_name,
      'result_status': analysis.result_status,
      'version': analysis.version,
  }
  if not analysis.triage_history:
    analysis.triage_history = []
  analysis.triage_history.append(triage_record)

  analysis.put()
  return True
Пример #9
0
    def run(self, failure_info, change_logs, deps_info, signals,
            build_completed):
        """Identifies culprit CL.

    Args:
      failure_info (dict): Output of pipeline DetectFirstFailurePipeline.
      change_logs (dict): Output of pipeline PullChangelogPipeline.
      signals (dict): Output of pipeline ExtractSignalPipeline.

    Returns:
      analysis_result returned by build_failure_analysis.AnalyzeBuildFailure.
    """
        master_name = failure_info['master_name']
        builder_name = failure_info['builder_name']
        build_number = failure_info['build_number']

        analysis_result, suspected_cls = build_failure_analysis.AnalyzeBuildFailure(
            failure_info, change_logs, deps_info, signals)
        analysis = WfAnalysis.Get(master_name, builder_name, build_number)
        analysis.build_completed = build_completed
        analysis.result = analysis_result
        analysis.status = analysis_status.COMPLETED
        analysis.result_status = _GetResultAnalysisStatus(analysis_result)
        analysis.suspected_cls = _GetSuspectedCLsWithOnlyCLInfo(suspected_cls)
        analysis.end_time = time_util.GetUTCNow()
        analysis.put()

        # Save suspected_cls to data_store.
        _SaveSuspectedCLs(suspected_cls, failure_info['master_name'],
                          failure_info['builder_name'],
                          failure_info['build_number'],
                          failure_info['failure_type'])
        return analysis_result
Пример #10
0
    def testModifyStatusIfDuplicateFirstResultUntriaged(self):
        analyses = self._CreateAnalyses('m', 'b', 3)
        check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])

        analysis_one = WfAnalysis.Get('m', 'b', 1)
        self.assertEqual(result_status.FOUND_UNTRIAGED,
                         analysis_one.result_status)
Пример #11
0
def UpdateAbortedAnalysis(parameters):
    """Updates analysis and checks if there is enough information to run a try job
   even if analysis aborts.

  Args:
    parameters(AnalyzeCompileFailureInput): Inputs to analyze a compile failure.

  Returns:
    (WfAnalysis, bool): WfAnalysis object and a bool value indicates if can
      resume the try job or not.
  """
    master_name, builder_name, build_number = parameters.build_key.GetParts()
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    assert analysis, ('WfAnalysis Object for {}/{}/{} was missing'.format(
        master_name, builder_name, build_number))

    # Heuristic analysis could have already completed, while triggering the
    # try job kept failing and lead to the abort.
    run_try_job = False
    heuristic_aborted = False
    if not analysis.completed:
        # Heuristic analysis is aborted.
        analysis.status = analysis_status.ERROR
        analysis.result_status = None
        heuristic_aborted = True

        if analysis.failure_info:
            # We need failure_info to run try jobs,
            # while signals is optional for compile try jobs.
            run_try_job = True
    analysis.aborted = True
    analysis.put()
    return analysis, run_try_job, heuristic_aborted
  def testAnalyzeCompileFailurePipelineNotAbortedIfWithoutError(self, mock_mon):
    master_name = 'm'
    builder_name = 'b'
    build_number = 124

    self._SetupAnalysis(
        master_name,
        builder_name,
        build_number,
        status=analysis_status.COMPLETED)

    pipeline_input = AnalyzeCompileFailureInput(
        build_key=BuildKey(
            master_name=master_name,
            builder_name=builder_name,
            build_number=build_number),
        current_failure_info=CompileFailureInfo.FromSerializable({}),
        build_completed=False,
        force=True)
    root_pipeline = AnalyzeCompileFailurePipeline(pipeline_input)
    root_pipeline.OnAbort(pipeline_input)

    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    self.assertIsNotNone(analysis)
    self.assertNotEqual(analysis_status.ERROR, analysis.status)
    self.assertFalse(mock_mon.called)
  def testUpdateAnalysisPartiallyTriaged(self):
    analysis = WfAnalysis.Create(
        self.master_name, self.builder_name, self.build_number_1)

    analysis.suspected_cls = [self.suspected_cl_1, self.suspected_cl_2]
    analysis.result_status = result_status.FOUND_UNTRIAGED
    analysis.put()

    success = triage_suspected_cl._UpdateAnalysis(
      self.master_name, self.builder_name, self.build_number_1,
      self.repo_name, self.revision_1, suspected_cl_status.CORRECT)

    expected_suspected_cls = [
      {
        'repo_name': self.repo_name,
        'revision': self.revision_1,
        'commit_position': self.commit_position,
        'url': 'https://codereview.chromium.org/123',
        'status': suspected_cl_status.CORRECT
      },
      self.suspected_cl_2
    ]

    analysis = WfAnalysis.Get(
        self.master_name, self.builder_name, self.build_number_1)
    self.assertTrue(success)
    self.assertEqual(analysis.result_status, result_status.FOUND_UNTRIAGED)
    self.assertEqual(analysis.suspected_cls, expected_suspected_cls)
Пример #14
0
    def testSecondAnalysisFailureGroupKeySet(self):
        master_name = 'm1'
        builder_name = 'b'
        build_number = 1
        master_name_2 = 'm2'

        blame_list = ['a']

        signals = {'compile': {'failed_output_nodes': ['abc.obj']}}

        WfAnalysis.Create(master_name, builder_name, build_number).put()
        # Run pipeline with signals that have certain failed output nodes.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name, builder_name, build_number, failure_type.COMPILE,
                blame_list, None, signals, None))

        WfAnalysis.Create(master_name_2, builder_name, build_number).put()
        # Run pipeline with signals that have the same failed output nodes.
        # Observe no new group creation.
        self.assertFalse(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name_2, builder_name, build_number,
                failure_type.COMPILE, blame_list, None, signals, None))

        analysis_2 = WfAnalysis.Get(master_name_2, builder_name, build_number)
        self.assertEqual([master_name, builder_name, build_number],
                         analysis_2.failure_group_key)
Пример #15
0
def _UpdateAnalysis(master_name, builder_name, build_number, repo_name,
                    revision, cl_status):
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    if not analysis or not analysis.suspected_cls:
        return False

    num_correct = 0
    num_incorrect = 0
    for cl in analysis.suspected_cls:
        if cl['repo_name'] == repo_name and cl['revision'] == revision:
            # Updates this cl's status.
            cl['status'] = cl_status

        # Checks if all the cls have been triaged and checks the status of each cl
        # on the build.
        if cl.get('status') == suspected_cl_status.CORRECT:
            num_correct += 1
        elif cl.get('status') == suspected_cl_status.INCORRECT:
            num_incorrect += 1

    if num_correct + num_incorrect == len(
            analysis.suspected_cls):  # All triaged.
        if num_correct == 0:
            analysis.result_status = result_status.FOUND_INCORRECT
        elif num_incorrect == 0:
            analysis.result_status = result_status.FOUND_CORRECT
        else:
            analysis.result_status = result_status.PARTIALLY_CORRECT_FOUND

    analysis.put()
    return True
def UpdateAnalysisWithFlakesFoundBySwarmingReruns(master_name, builder_name,
                                                  build_number, flaky_tests):
  """Updates WfAnalysis about flaky tests found by swarming reruns.

  Args:
    master_name(str): Name of the master.
    builder_name(str): Name of the builder.
    build_number(int): Number of the build.
    flaky_tests(dict): A dict of flaky tests.
  """

  if not flaky_tests:
    return

  analysis = WfAnalysis.Get(master_name, builder_name, build_number)
  assert analysis
  if not analysis.result or analysis.flaky_tests == flaky_tests:
    return

  updated_result, all_flaked = UpdateAnalysisResultWithFlakeInfo(
      analysis.result, flaky_tests)
  updated_result_status = result_status.FLAKY if all_flaked else None
  analysis.UpdateWithNewFindings(
      updated_result_status=updated_result_status,
      updated_result=updated_result,
      flaky_tests=flaky_tests)
def GetsFirstFailureAtTestLevel(master_name, builder_name, build_number,
                                failure_info, force):
  """Gets first time failed steps and tests in the build that has not been
    analyzed.

  This function will also update analysis.failure_result_map for new failures
  that has not been analyzed.

  But if force is True, this function will return all first time failures in the
  build and not update analysis.failure_result_map.

  Args:
    master_name(str): Name of the master.
    builder_name(str): Name of the builder.
    build_number(int): Number of the build.
    failure_info(TestFailureInfo): Information about the build failure.
    force(bool): If the analysis is a forced rerun.
  """
  analysis = WfAnalysis.Get(master_name, builder_name, build_number)

  if not analysis:
    return {}

  # A dict to store all the first time failed steps and/ or tests which
  # have not triggered a swarming task yet.
  result_steps = defaultdict(list)
  failure_result_map = analysis.failure_result_map

  for failed_step_name, step_failure_details in (
      failure_info.failed_steps.iteritems()):
    if not step_failure_details.tests or not step_failure_details.supported:
      # Not a test type Findit currently handles.
      continue

    if not force:
      if failure_result_map.get(failed_step_name):
        # The step has been processed.
        continue
      else:
        failure_result_map[failed_step_name] = {}

    for failed_test_name, test_failure_details in (
        step_failure_details.tests.iteritems()):
      if not force:
        # Updates analysis.failure_result_map only when the analysis runs at the
        # first time.
        task_key = '%s/%s/%s' % (master_name, builder_name,
                                 test_failure_details.first_failure)
        failure_result_map[failed_step_name][failed_test_name] = task_key

      if (test_failure_details.first_failure ==
          test_failure_details.current_failure):
        # First time failure, add to result_steps.
        result_steps[failed_step_name].append(
            test_failure_details.base_test_name)

  if not force:
    analysis.put()
  return result_steps
Пример #18
0
 def testUpdateAnalysisResultStatusWhenAnalysisIsIncomplete(self):
     success = triage_analysis._UpdateAnalysisResultStatus(
         self.master_name, self.builder_name, self.build_number_incomplete,
         True)
     self.assertFalse(success)
     analysis = WfAnalysis.Get(self.master_name, self.builder_name,
                               self.build_number_found)
     self.assertIsNone(analysis.result_status)
Пример #19
0
  def testObscureTriageRecordsInWfAnalysis(self):
    mocked_utcnow = datetime(2017, 05, 05, 22, 50, 10)
    self.MockUTCNow(mocked_utcnow)
    valid_record_time = obscure_emails._TimeBeforeNow(
        days=obscure_emails._TRIAGE_RECORD_RENTENSION_DAYS - 10)
    invalid_record_time = obscure_emails._TimeBeforeNow(
        days=obscure_emails._TRIAGE_RECORD_RENTENSION_DAYS + 10)

    old_analysis = WfAnalysis.Create('m', 'b', 1)
    old_analysis.triage_history = [{
        'user_name': '*****@*****.**',
    }]
    old_analysis.triage_email_obscured = False
    old_analysis.triage_record_last_add = invalid_record_time
    old_analysis.put()

    recent_analysis = WfAnalysis.Create('m', 'b', 100000)
    recent_analysis.triage_history = [{
        'user_name': '*****@*****.**',
    }]
    recent_analysis.triage_email_obscured = False
    recent_analysis.triage_record_last_add = valid_record_time
    recent_analysis.put()

    response = self.test_app.get(
        '/obscure-emails',
        params={'format': 'json'},
        headers={'X-AppEngine-Cron': 'true'},
    )
    expected_response = {
        'failure_triage_count': 1,
        'flake_triage_count': 0,
        'flake_request_aggregated_count': 0,
        'flake_request_count': 0,
    }
    self.assertEqual(expected_response, response.json_body)

    old_analysis = WfAnalysis.Get('m', 'b', 1)
    self.assertEqual('*****@*****.**',
                     old_analysis.triage_history[0]['user_name'])
    self.assertTrue(old_analysis.triage_email_obscured)

    recent_analysis = WfAnalysis.Get('m', 'b', 100000)
    self.assertEqual('*****@*****.**',
                     recent_analysis.triage_history[0]['user_name'])
    self.assertFalse(recent_analysis.triage_email_obscured)
    def RunImpl(self, build_key):
        """Triggers flake analyses for flaky tests found by CI failure analysis."""
        master_name, builder_name, build_number = build_key.GetParts()
        flake_settings = waterfall_config.GetCheckFlakeSettings()
        throttled = flake_settings.get('throttle_flake_analyses', True)

        analysis = WfAnalysis.Get(master_name, builder_name, build_number)

        if not analysis or not analysis.flaky_tests:
            return

        analysis_counts = defaultdict(lambda: defaultdict(int))
        for step, flaky_tests in analysis.flaky_tests.iteritems():
            logging.info('%s/%s/%s/%s has %s flaky tests.', master_name,
                         builder_name, build_number, step, len(flaky_tests))

            for test_name in flaky_tests:
                # TODO(crbug.com/904050): Deprecate FlakeAnalysisRequest in favor of
                # Flake.
                flake = flake_util.GetFlake(_LUCI_PROJECT, step, test_name,
                                            master_name, builder_name,
                                            build_number)
                request = FlakeAnalysisRequest.Create(test_name, False, None)
                request.AddBuildStep(master_name, builder_name, build_number,
                                     step, time_util.GetUTCNow())
                request.flake_key = flake.key
                scheduled = flake_analysis_service.ScheduleAnalysisForFlake(
                    request, '*****@*****.**',
                    False, triggering_sources.FINDIT_PIPELINE)
                if scheduled:  # pragma: no branch
                    analysis_counts[step]['analyzed'] += 1
                    logging.info(
                        'A flake analysis has been triggered for %s/%s', step,
                        test_name)
                    if throttled and len(flaky_tests) > 1:
                        logging.info(
                            'Throttling is enabled, skipping %d tests.',
                            len(flaky_tests) - 1)
                        analysis_counts[step]['throttled'] = len(
                            flaky_tests) - 1
                        break  # If we're throttled, stop after the first.
            else:
                analysis_counts[step]['error'] += 1

        for step, step_counts in analysis_counts.iteritems():
            # Collects metrics.
            step_metadata = step_util.GetStepMetadata(master_name,
                                                      builder_name,
                                                      build_number, step)
            canonical_step_name = step_metadata.get(
                'canonical_step_name') or 'Unknown'
            isolate_target_name = step_metadata.get(
                'isolate_target_name') or 'Unknown'

            for operation, count in step_counts.iteritems():
                monitoring.OnFlakeIdentified(canonical_step_name,
                                             isolate_target_name, operation,
                                             count)
  def testIdentifyCulpritForFlakyCompile(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 1
    try_job_id = '1'

    compile_result = {
        'report': {
            'result': {
                'rev1': 'failed',
                'rev2': 'failed'
            },
            'metadata': {
                'sub_ranges': [
                  [
                      None,
                      'rev2'
                  ]
                ]
            }
        },
        'url': 'url',
        'try_job_id': try_job_id,
    }

    self._CreateEntities(master_name, builder_name, build_number, try_job_id)

    analysis = WfAnalysis.Create(master_name, builder_name, build_number)
    analysis.result = {
        'failures': [
            {
                'step_name': 'compile',
                'suspected_cls': []
            }
        ]
    }
    analysis.put()

    self.MockPipeline(RevertAndNotifyCulpritPipeline,
                      None,
                      expected_args=[master_name, builder_name, build_number,
                                     {}, [], failure_type.COMPILE])
    pipeline = IdentifyTryJobCulpritPipeline(
        master_name, builder_name, build_number,
        failure_type.COMPILE, '1', compile_result)
    pipeline.start()
    self.execute_queued_tasks()

    try_job = WfTryJob.Get(master_name, builder_name, build_number)

    self.assertEqual(analysis_status.COMPLETED, try_job.status)

    try_job_data = WfTryJobData.Get(try_job_id)
    self.assertIsNone(try_job_data.culprits)

    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    self.assertEqual(result_status.FLAKY, analysis.result_status)
    self.assertEqual([], analysis.suspected_cls)
def NeedANewAnalysis(master_name, builder_name, build_number, failed_steps,
                     build_completed, force):
    """Checks status of analysis for the build and decides if a new one is needed.

  A WfAnalysis entity for the given build will be created if none exists.
  When a new analysis is needed, this function will create and save a WfAnalysis
  entity to the datastore, or it will reset the existing one but still keep the
  result of last analysis.

  Returns:
    True if an analysis is needed, otherwise False.
  """
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)

    if not analysis:
        # The build failure is not analyzed yet.
        analysis = WfAnalysis.Create(master_name, builder_name, build_number)
        analysis.status = analysis_status.PENDING
        analysis.request_time = time_util.GetUTCNow()
        analysis.put()
        return True
    elif force:
        # A new analysis could be forced if last analysis was completed.
        if not analysis.completed:
            # TODO: start a new analysis if the last one has started running but it
            # has no update for a considerable amount of time, eg. 10 minutes.
            logging.info(
                'Existing analysis is not completed yet. No new analysis.')
            return False

        analysis.Reset()
        analysis.request_time = time_util.GetUTCNow()
        analysis.put()
        return True
    elif failed_steps and analysis.completed:
        # If there is any new failed step, a new analysis is needed.
        for step in failed_steps:
            analyzed = any(step == s for s in analysis.not_passed_steps)
            if analyzed:
                continue

            logging.info('At least one new failed step is detected: %s', step)
            analysis.Reset()
            analysis.request_time = time_util.GetUTCNow()
            analysis.put()
            return True

    # Start a new analysis if the build cycle wasn't completed in last analysis,
    # but now it is completed. This will potentially trigger a try-job run.
    if analysis.completed and not analysis.build_completed and build_completed:
        return True

    # TODO: support following cases
    # * Automatically retry if last analysis failed with errors.
    # * Analysis is not complete and no update in the last 5 minutes.
    logging.info('Not match any cases. No new analysis.')
    return False
Пример #23
0
 def testUpdateAnalysisResultStatusWhenFoundAndCorrect(self):
     success = triage_analysis._UpdateAnalysisResultStatus(
         self.master_name, self.builder_name, self.build_number_found, True)
     self.assertTrue(success)
     analysis = WfAnalysis.Get(self.master_name, self.builder_name,
                               self.build_number_found)
     self.assertEquals(wf_analysis_result_status.FOUND_CORRECT,
                       analysis.result_status)
     self.assertEquals(self.suspected_cls, analysis.culprit_cls)
Пример #24
0
    def testModifyStatusIfDuplicateCheckForTriagedResult(self):
        analyses = self._CreateAnalyses('m', 'b', 1)

        analyses[0].result_status = result_status.NOT_FOUND_UNTRIAGED
        check_duplicate_failures._ModifyStatusIfDuplicate(analyses[0])

        analysis = WfAnalysis.Get('m', 'b', 0)
        self.assertEqual(result_status.NOT_FOUND_UNTRIAGED,
                         analysis.result_status)
Пример #25
0
 def _ResetAnalysis(self, master_name, builder_name, build_number):
     analysis = WfAnalysis.Get(master_name, builder_name, build_number)
     analysis.pipeline_status_path = self.pipeline_status_path()
     analysis.status = analysis_status.RUNNING
     analysis.result_status = None
     analysis.start_time = time_util.GetUTCNow()
     analysis.version = appengine_util.GetCurrentVersion()
     analysis.end_time = None
     analysis.put()
Пример #26
0
 def _ResetAnalysis(self, master_name, builder_name, build_number):
     analysis = WfAnalysis.Get(master_name, builder_name, build_number)
     analysis.pipeline_status_path = self.pipeline_status_path()
     analysis.status = wf_analysis_status.ANALYZING
     analysis.result_status = None
     analysis.start_time = datetime.utcnow()
     analysis.version = modules.get_current_version_name()
     analysis.end_time = None
     analysis.put()
Пример #27
0
    def testModifyStatusIfDuplicateDuplicateStatusInBetween(self):
        analyses = self._CreateAnalyses('m', 'b', 5)

        analyses[0].result_status = result_status.FOUND_CORRECT
        analyses[0].put()
        analyses[4].result_status = result_status.FOUND_CORRECT
        analyses[4].put()

        analyses[2].result_status = (result_status.FOUND_CORRECT_DUPLICATE)
        analyses[2].put()

        check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])

        analysis_one = WfAnalysis.Get('m', 'b', 1)
        analysis_three = WfAnalysis.Get('m', 'b', 3)
        self.assertEqual(result_status.FOUND_CORRECT_DUPLICATE,
                         analysis_one.result_status)
        self.assertEqual(result_status.FOUND_CORRECT_DUPLICATE,
                         analysis_three.result_status)
Пример #28
0
 def testUpdateAnalysisResultStatusWhenNotFoundButIncorrect(self):
     success = triage_analysis._UpdateAnalysisResultStatus(
         self.master_name, self.builder_name, self.build_number_not_found,
         False)
     self.assertTrue(success)
     analysis = WfAnalysis.Get(self.master_name, self.builder_name,
                               self.build_number_not_found)
     self.assertEquals(wf_analysis_result_status.NOT_FOUND_INCORRECT,
                       analysis.result_status)
     self.assertIsNone(analysis.culprit_cls)
Пример #29
0
 def testReset(self):
     master_name = 'm1'
     builder_name = 'b'
     build_number = 1
     analysis = WfAnalysis.Create(master_name, builder_name, build_number)
     analysis.status = analysis_status.COMPLETED
     analysis.put()
     analysis.Reset()
     analysis = WfAnalysis.Get(master_name, builder_name, build_number)
     self.assertEqual(analysis_status.PENDING, analysis.status)
Пример #30
0
    def testModifyStatusIfDuplicateOnlyOneTriagedEnd(self):
        analyses = self._CreateAnalyses('m', 'b', 4)

        analyses[0].result_status = result_status.FOUND_CORRECT
        analyses[0].put()

        check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])
        for i in range(1, 3):
            analysis = WfAnalysis.Get('m', 'b', i)
            self.assertEqual(result_status.FOUND_UNTRIAGED,
                             analysis.result_status)