def testDoNotGroupTestsWithDisjointBlameLists(self):
        master_name = 'm1'
        builder_name = 'b'
        build_number = 1
        master_name_2 = 'm2'

        blame_list_1 = ['a']
        blame_list_2 = ['b']
        failed_steps = {
            'step_a': {
                'current_failure': 3,
                'first_failure': 2,
                'last_pass': 1
            }
        }

        WfAnalysis.Create(master_name, builder_name, build_number).put()
        # Run pipeline with signals that have certain failed steps.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name, builder_name, build_number, failure_type.TEST,
                blame_list_1, failed_steps, None, None))
        self.assertIsNotNone(
            WfFailureGroup.Get(master_name, builder_name, build_number))

        WfAnalysis.Create(master_name_2, builder_name, build_number).put()
        # Run pipeline with signals that have different failed steps.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name_2, builder_name, build_number, failure_type.TEST,
                blame_list_2, failed_steps, None, None))
        self.assertTrue(
            WfFailureGroup.Get(master_name_2, builder_name, build_number))
    def testDoNotGroupCompilesWithDifferentOutputNodes(self):
        master_name = 'm1'
        builder_name = 'b'
        build_number = 1
        master_name_2 = 'm2'

        blame_list = ['a']

        signals_1 = {'compile': {'failed_output_nodes': ['abc.obj']}}

        signals_2 = {'compile': {'failed_output_nodes': ['def.obj']}}

        WfAnalysis.Create(master_name, builder_name, build_number).put()
        # Run pipeline with signals that have certain failed output nodes.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name, builder_name, build_number, failure_type.COMPILE,
                blame_list, None, signals_1, None))
        self.assertIsNotNone(
            WfFailureGroup.Get(master_name, builder_name, build_number))

        WfAnalysis.Create(master_name_2, builder_name, build_number).put()
        # Run pipeline with signals that have different failed output nodes.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name_2, builder_name, build_number,
                failure_type.COMPILE, blame_list, None, signals_2, None))
        self.assertTrue(
            WfFailureGroup.Get(master_name_2, builder_name, build_number))
    def testGroupTestsWithRelatedStepsWithoutHeuristicResult(self):
        master_name = 'm1'
        builder_name = 'b'
        build_number = 1
        master_name_2 = 'm2'

        blame_list = ['a']

        failed_steps = {
            'step_a': {
                'current_failure': 3,
                'first_failure': 2,
                'last_pass': 1
            }
        }

        WfAnalysis.Create(master_name, builder_name, build_number).put()
        # Run pipeline with signals that have certain failed steps.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name, builder_name, build_number, failure_type.TEST,
                blame_list, failed_steps, None, None))
        self.assertIsNotNone(
            WfFailureGroup.Get(master_name, builder_name, build_number))

        WfAnalysis.Create(master_name_2, builder_name, build_number).put()
        # Run pipeline with signals that have the same failed steps.
        # Observe no new group creation.
        self.assertFalse(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name_2, builder_name, build_number, failure_type.TEST,
                blame_list, failed_steps, None, None))
        self.assertIsNone(
            WfFailureGroup.Get(master_name_2, builder_name, build_number))
    def testGroupCompilesWithRelatedFailuresWithoutHeuristicResult(self):
        master_name = 'm1'
        builder_name = 'b'
        build_number = 1
        master_name_2 = 'm2'

        blame_list = ['a']

        signals = {'compile': {'failed_output_nodes': ['abc.obj']}}

        WfAnalysis.Create(master_name, builder_name, build_number).put()
        # Run pipeline with signals that have certain failed output nodes.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name, builder_name, build_number, failure_type.COMPILE,
                blame_list, None, signals, None))
        self.assertIsNotNone(
            WfFailureGroup.Get(master_name, builder_name, build_number))

        WfAnalysis.Create(master_name_2, builder_name, build_number).put()
        # Run pipeline with signals that have the same failed output nodes.
        # Observe no new group creation.
        self.assertFalse(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name_2, builder_name, build_number,
                failure_type.COMPILE, blame_list, None, signals, None))
        self.assertIsNone(
            WfFailureGroup.Get(master_name_2, builder_name, build_number))
  def testUpdateAnalysisPartiallyTriaged(self):
    analysis = WfAnalysis.Create(
        self.master_name, self.builder_name, self.build_number_1)

    analysis.suspected_cls = [self.suspected_cl_1, self.suspected_cl_2]
    analysis.result_status = result_status.FOUND_UNTRIAGED
    analysis.put()

    success = triage_suspected_cl._UpdateAnalysis(
      self.master_name, self.builder_name, self.build_number_1,
      self.repo_name, self.revision_1, suspected_cl_status.CORRECT)

    expected_suspected_cls = [
      {
        'repo_name': self.repo_name,
        'revision': self.revision_1,
        'commit_position': self.commit_position,
        'url': 'https://codereview.chromium.org/123',
        'status': suspected_cl_status.CORRECT
      },
      self.suspected_cl_2
    ]

    analysis = WfAnalysis.Get(
        self.master_name, self.builder_name, self.build_number_1)
    self.assertTrue(success)
    self.assertEqual(analysis.result_status, result_status.FOUND_UNTRIAGED)
    self.assertEqual(analysis.suspected_cls, expected_suspected_cls)
    def testSecondAnalysisFailureGroupKeySet(self):
        master_name = 'm1'
        builder_name = 'b'
        build_number = 1
        master_name_2 = 'm2'

        blame_list = ['a']

        signals = {'compile': {'failed_output_nodes': ['abc.obj']}}

        WfAnalysis.Create(master_name, builder_name, build_number).put()
        # Run pipeline with signals that have certain failed output nodes.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name, builder_name, build_number, failure_type.COMPILE,
                blame_list, None, signals, None))

        WfAnalysis.Create(master_name_2, builder_name, build_number).put()
        # Run pipeline with signals that have the same failed output nodes.
        # Observe no new group creation.
        self.assertFalse(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name_2, builder_name, build_number,
                failure_type.COMPILE, blame_list, None, signals, None))

        analysis_2 = WfAnalysis.Get(master_name_2, builder_name, build_number)
        self.assertEqual([master_name, builder_name, build_number],
                         analysis_2.failure_group_key)
  def testAppendTriageHistoryRecordWithHistory(self):
    analysis = WfAnalysis.Create(
        self.master_name, self.builder_name, self.build_number_1)
    analysis.version = 'version'
    analysis.triage_history = [{'some_info': True}]
    analysis.put()
    cl_info = '%s/%s' % (self.repo_name, self.revision_1)

    mocked_now = datetime(2017, 05, 01, 10, 10, 10)
    mocked_timestamp = calendar.timegm(mocked_now.timetuple())
    self.MockUTCNow(mocked_now)

    triage_suspected_cl._AppendTriageHistoryRecord(
        self.master_name, self.builder_name, self.build_number_1,
        cl_info, suspected_cl_status.CORRECT, 'test')
    analysis = WfAnalysis.Get(
        self.master_name, self.builder_name, self.build_number_1)

    expected_history = [
        {'some_info': True},
        {
          'triage_timestamp': mocked_timestamp,
          'user_name': 'test',
          'cl_status': suspected_cl_status.CORRECT,
          'version': 'version',
          'triaged_cl': cl_info
        }
    ]
    self.assertEqual(analysis.triage_history, expected_history)
    self.assertFalse(analysis.triage_email_obscured)
    self.assertEqual(mocked_now, analysis.triage_record_last_add)
Example #8
0
    def setUp(self):
        super(TriageAnalysisTest, self).setUp()
        self.master_name = 'm'
        self.builder_name = 'b'
        self.build_number_incomplete = 120  # Analysis is not completed yet.
        self.build_number_found = 122  # Suspected CLs are found for this build.
        self.build_number_not_found = 123  # No suspected CLs found.
        self.suspected_cls = [{
            'repo_name': 'chromium',
            'revision': 'r1',
            'commit_position': 123,
            'url': 'https://codereview.chromium.org/123',
        }]

        analysis = WfAnalysis.Create(self.master_name, self.builder_name,
                                     self.build_number_incomplete)
        analysis.status = wf_analysis_status.ANALYZING
        analysis.put()

        analysis = WfAnalysis.Create(self.master_name, self.builder_name,
                                     self.build_number_found)
        analysis.status = wf_analysis_status.ANALYZED
        analysis.suspected_cls = self.suspected_cls
        analysis.put()

        analysis = WfAnalysis.Create(self.master_name, self.builder_name,
                                     self.build_number_not_found)
        analysis.status = wf_analysis_status.ANALYZED
        analysis.put()

        self.mock_current_user(user_email='*****@*****.**', is_admin=True)
  def testIdentifyCulpritForFlakyCompile(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 1
    try_job_id = '1'

    compile_result = {
        'report': {
            'result': {
                'rev1': 'failed',
                'rev2': 'failed'
            },
            'metadata': {
                'sub_ranges': [
                  [
                      None,
                      'rev2'
                  ]
                ]
            }
        },
        'url': 'url',
        'try_job_id': try_job_id,
    }

    self._CreateEntities(master_name, builder_name, build_number, try_job_id)

    analysis = WfAnalysis.Create(master_name, builder_name, build_number)
    analysis.result = {
        'failures': [
            {
                'step_name': 'compile',
                'suspected_cls': []
            }
        ]
    }
    analysis.put()

    self.MockPipeline(RevertAndNotifyCulpritPipeline,
                      None,
                      expected_args=[master_name, builder_name, build_number,
                                     {}, [], failure_type.COMPILE])
    pipeline = IdentifyTryJobCulpritPipeline(
        master_name, builder_name, build_number,
        failure_type.COMPILE, '1', compile_result)
    pipeline.start()
    self.execute_queued_tasks()

    try_job = WfTryJob.Get(master_name, builder_name, build_number)

    self.assertEqual(analysis_status.COMPLETED, try_job.status)

    try_job_data = WfTryJobData.Get(try_job_id)
    self.assertIsNone(try_job_data.culprits)

    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    self.assertEqual(result_status.FLAKY, analysis.result_status)
    self.assertEqual([], analysis.suspected_cls)
def NeedANewAnalysis(master_name, builder_name, build_number, failed_steps,
                     build_completed, force):
    """Checks status of analysis for the build and decides if a new one is needed.

  A WfAnalysis entity for the given build will be created if none exists.
  When a new analysis is needed, this function will create and save a WfAnalysis
  entity to the datastore, or it will reset the existing one but still keep the
  result of last analysis.

  Returns:
    True if an analysis is needed, otherwise False.
  """
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)

    if not analysis:
        # The build failure is not analyzed yet.
        analysis = WfAnalysis.Create(master_name, builder_name, build_number)
        analysis.status = analysis_status.PENDING
        analysis.request_time = time_util.GetUTCNow()
        analysis.put()
        return True
    elif force:
        # A new analysis could be forced if last analysis was completed.
        if not analysis.completed:
            # TODO: start a new analysis if the last one has started running but it
            # has no update for a considerable amount of time, eg. 10 minutes.
            logging.info(
                'Existing analysis is not completed yet. No new analysis.')
            return False

        analysis.Reset()
        analysis.request_time = time_util.GetUTCNow()
        analysis.put()
        return True
    elif failed_steps and analysis.completed:
        # If there is any new failed step, a new analysis is needed.
        for step in failed_steps:
            analyzed = any(step == s for s in analysis.not_passed_steps)
            if analyzed:
                continue

            logging.info('At least one new failed step is detected: %s', step)
            analysis.Reset()
            analysis.request_time = time_util.GetUTCNow()
            analysis.put()
            return True

    # Start a new analysis if the build cycle wasn't completed in last analysis,
    # but now it is completed. This will potentially trigger a try-job run.
    if analysis.completed and not analysis.build_completed and build_completed:
        return True

    # TODO: support following cases
    # * Automatically retry if last analysis failed with errors.
    # * Analysis is not complete and no update in the last 5 minutes.
    logging.info('Not match any cases. No new analysis.')
    return False
Example #11
0
 def testReset(self):
     master_name = 'm1'
     builder_name = 'b'
     build_number = 1
     analysis = WfAnalysis.Create(master_name, builder_name, build_number)
     analysis.status = analysis_status.COMPLETED
     analysis.put()
     analysis.Reset()
     analysis = WfAnalysis.Get(master_name, builder_name, build_number)
     self.assertEqual(analysis_status.PENDING, analysis.status)
  def testBuildFailurePipelineFlow(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 124
    current_failure_info = {}

    self._SetupAnalysis(master_name, builder_name, build_number)

    heuristic_params = CompileHeuristicAnalysisParameters.FromSerializable({
        'failure_info': current_failure_info,
        'build_completed': False
    })
    heuristic_output = CompileHeuristicAnalysisOutput.FromSerializable({
        'failure_info': None,
        'signals': None,
        'heuristic_result': {}
    })
    self.MockSynchronousPipeline(
        analyze_compile_failure_pipeline.HeuristicAnalysisForCompilePipeline,
        heuristic_params, heuristic_output)

    start_try_job_params = StartCompileTryJobInput(
        build_key=BuildKey(
            master_name=master_name,
            builder_name=builder_name,
            build_number=build_number),
        heuristic_result=heuristic_output,
        build_completed=False,
        force=False)
    self.MockGeneratorPipeline(
        analyze_compile_failure_pipeline.StartCompileTryJobPipeline,
        start_try_job_params, False)

    report_event_input = report_event_pipeline.ReportEventInput(
        analysis_urlsafe_key=WfAnalysis.Get(master_name, builder_name,
                                            build_number).key.urlsafe())
    self.MockGeneratorPipeline(
        report_event_pipeline.ReportAnalysisEventPipeline, report_event_input,
        None)

    pipeline_input = AnalyzeCompileFailureInput(
        build_key=BuildKey(
            master_name=master_name,
            builder_name=builder_name,
            build_number=build_number),
        current_failure_info=CompileFailureInfo.FromSerializable(
            current_failure_info),
        build_completed=False,
        force=False)
    root_pipeline = AnalyzeCompileFailurePipeline(pipeline_input)
    root_pipeline.start(queue_name=constants.DEFAULT_QUEUE)
    self.execute_queued_tasks()
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    self.assertEqual(analysis_status.RUNNING, analysis.status)
Example #13
0
def _GetResultAndFailureResultMap(master_name, builder_name, build_number):
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)

    # If this analysis is part of a group, get the build analysis that opened the
    # group.
    if analysis and analysis.failure_group_key:
        analysis = WfAnalysis.Get(*analysis.failure_group_key)

    if not analysis:
        return None, None

    return analysis.result, analysis.failure_result_map
    def testDoNotGroupTestsWithDifferentHeuristicResults(self):
        master_name = 'm1'
        builder_name = 'b'
        build_number = 1
        master_name_2 = 'm2'

        blame_list = ['a']
        failed_steps = {
            'step_a': {
                'current_failure': 3,
                'first_failure': 2,
                'last_pass': 1
            }
        }

        heuristic_result_1 = {
            'failures': [{
                'step_name': 'step1',
                'suspected_cls': [{
                    'revision': 'rev1',
                }],
            }]
        }

        heuristic_result_2 = {
            'failures': [{
                'step_name': 'step1',
                'suspected_cls': [{
                    'revision': 'rev2',
                }],
            }]
        }

        WfAnalysis.Create(master_name, builder_name, build_number).put()
        # Run pipeline with signals that have certain failed steps.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name, builder_name, build_number, failure_type.TEST,
                blame_list, failed_steps, None, heuristic_result_1))
        self.assertIsNotNone(
            WfFailureGroup.Get(master_name, builder_name, build_number))

        WfAnalysis.Create(master_name_2, builder_name, build_number).put()
        # Run pipeline with signals that have different failed steps.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name_2, builder_name, build_number, failure_type.TEST,
                blame_list, failed_steps, None, heuristic_result_2))
        self.assertTrue(
            WfFailureGroup.Get(master_name_2, builder_name, build_number))
Example #15
0
    def testSaveSignalInAnalysis(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 123
        signals = 'signals'

        analysis = WfAnalysis.Create(master_name, builder_name, build_number)
        analysis.put()

        extract_signal.SaveSignalInAnalysis(master_name, builder_name,
                                            build_number, signals)

        analysis = WfAnalysis.Get(master_name, builder_name, build_number)
        self.assertEqual(signals, analysis.signals)
    def testLinkAnalysisToBuildFailureGroup(self):
        master_name = 'm1'
        builder_name = 'b'
        build_number = 1

        failure_group_key = ['m2', 'b2', 2]
        WfAnalysis.Create(master_name, builder_name, build_number).put()
        try_job_util._LinkAnalysisToBuildFailureGroup(master_name,
                                                      builder_name,
                                                      build_number,
                                                      failure_group_key)
        self.assertEqual(
            failure_group_key,
            WfAnalysis.Get(master_name, builder_name,
                           build_number).failure_group_key)
    def testSaveAnalysisAfterHeuristicAnalysisCompletes(self, *_):
        master_name = 'm'
        builder_name = 'b'
        build_number = 98
        analysis_result = {'result': {}}

        analysis = WfAnalysis.Create(master_name, builder_name, build_number)
        analysis.start_time = datetime(2016, 06, 26, 23)
        analysis.put()

        build_failure_analysis.SaveAnalysisAfterHeuristicAnalysisCompletes(
            master_name, builder_name, build_number, analysis_result, [])

        analysis = WfAnalysis.Get(master_name, builder_name, build_number)
        self.assertEqual(analysis_status.COMPLETED, analysis.status)
    def testDoNotGroupCompilesWithDifferentHeuristicResults(self):
        master_name = 'm1'
        builder_name = 'b'
        build_number = 1
        master_name_2 = 'm2'

        blame_list = ['a']

        signals = {'compile': {'failed_output_nodes': ['abc.obj']}}

        heuristic_result_1 = {
            'failures': [{
                'step_name': 'step1',
                'suspected_cls': [{
                    'revision': 'rev1',
                }],
            }]
        }

        heuristic_result_2 = {
            'failures': [{
                'step_name': 'step1',
                'suspected_cls': [{
                    'revision': 'rev2',
                }],
            }]
        }

        WfAnalysis.Create(master_name, builder_name, build_number).put()
        # Run pipeline with signals that have certain failed output nodes.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name, builder_name, build_number, failure_type.COMPILE,
                blame_list, None, signals, heuristic_result_1))
        self.assertIsNotNone(
            WfFailureGroup.Get(master_name, builder_name, build_number))

        WfAnalysis.Create(master_name_2, builder_name, build_number).put()
        # Run pipeline with signals that have different failed output nodes.
        # Observe new group creation.
        self.assertTrue(
            try_job_util._IsBuildFailureUniqueAcrossPlatforms(
                master_name_2, builder_name, build_number,
                failure_type.COMPILE, blame_list, None, signals,
                heuristic_result_2))
        self.assertTrue(
            WfFailureGroup.Get(master_name_2, builder_name, build_number))
Example #19
0
    def testModifyStatusIfDuplicateFirstResultUntriaged(self):
        analyses = self._CreateAnalyses('m', 'b', 3)
        check_duplicate_failures._ModifyStatusIfDuplicate(analyses[1])

        analysis_one = WfAnalysis.Get('m', 'b', 1)
        self.assertEqual(result_status.FOUND_UNTRIAGED,
                         analysis_one.result_status)
Example #20
0
    def testModifyStatusIfDuplicateSingleAnalysisResult(self):
        analyses = self._CreateAnalyses('m', 'b', 1)

        check_duplicate_failures._ModifyStatusIfDuplicate(analyses[0])

        analysis = WfAnalysis.Get('m', 'b', 0)
        self.assertEqual(result_status.FOUND_UNTRIAGED, analysis.result_status)
Example #21
0
 def testWfAnalysisCorrectnessIsUnknownIfUntriaged(self):
   for result_status in (wf_analysis_result_status.FOUND_UNTRIAGED,
                         wf_analysis_result_status.NOT_FOUND_UNTRIAGED):
     analysis = WfAnalysis.Create('m', 'b', 123)
     analysis.status = wf_analysis_status.ANALYZED
     analysis.result_status = result_status
     self.assertIsNone(analysis.correct)
Example #22
0
    def testTriggerFlakeAnalysesPipeline(self, mock_monitoring, *_):
        master_name = 'm'
        builder_name = 'b'
        build_number = 2
        step_name = 'a_tests'
        test_name = 'Unittest1.Subtest1'

        analysis = WfAnalysis.Create(master_name, builder_name, build_number)
        analysis.flaky_tests = {step_name: [test_name, 'Unittest1.Subtest2']}
        analysis.put()

        build_key = BuildKey(master_name=master_name,
                             builder_name=builder_name,
                             build_number=build_number)

        with mock.patch.object(
                flake_analysis_service,
                'ScheduleAnalysisForFlake') as mocked_ScheduleAnalysisForFlake:
            pipeline = TriggerFlakeAnalysesPipeline(build_key)
            pipeline.RunImpl(build_key)
            self.assertTrue(mocked_ScheduleAnalysisForFlake.called)
            mock_monitoring.assert_has_calls([
                mock.call('a_tests', 'a_tests', 'analyzed', 1),
                mock.call('a_tests', 'a_tests', 'throttled', 1)
            ])
    def testGetHeuristicSuspectedCLs(self):
        repo_name = 'chromium'
        revision = 'r123_2'

        culprit = WfSuspectedCL.Create(repo_name, revision, None)
        culprit.put()

        analysis = WfAnalysis.Create('m', 'b', 123)
        analysis.suspected_cls = [{
            'repo_name': repo_name,
            'revision': revision,
            'commit_position': None,
            'url': None,
            'failures': {
                'b': ['Unittest2.Subtest1', 'Unittest3.Subtest2']
            },
            'top_score': 4
        }]
        analysis.put()

        suspected_cls = [culprit.key.urlsafe()]

        self.assertEqual(
            suspected_cls,
            build_failure_analysis.GetHeuristicSuspectedCLs(
                'm', 'b', 123).ToSerializable())
def UpdateAnalysisWithFlakesFoundBySwarmingReruns(master_name, builder_name,
                                                  build_number, flaky_tests):
  """Updates WfAnalysis about flaky tests found by swarming reruns.

  Args:
    master_name(str): Name of the master.
    builder_name(str): Name of the builder.
    build_number(int): Number of the build.
    flaky_tests(dict): A dict of flaky tests.
  """

  if not flaky_tests:
    return

  analysis = WfAnalysis.Get(master_name, builder_name, build_number)
  assert analysis
  if not analysis.result or analysis.flaky_tests == flaky_tests:
    return

  updated_result, all_flaked = UpdateAnalysisResultWithFlakeInfo(
      analysis.result, flaky_tests)
  updated_result_status = result_status.FLAKY if all_flaked else None
  analysis.UpdateWithNewFindings(
      updated_result_status=updated_result_status,
      updated_result=updated_result,
      flaky_tests=flaky_tests)
 def _FetchAndSortUntriagedAnalyses():
   query = WfAnalysis.query(
   WfAnalysis.result_status==wf_analysis_result_status.FOUND_UNTRIAGED)
   analyses = query.fetch()
   return sorted(
       analyses,
       key=lambda x : (x.master_name, x.builder_name, x.build_number))
Example #26
0
    def testGetFailedStepsForEachCL(self):
        analysis = WfAnalysis.Create('m', 'b', 0)
        analysis.result = {
            'failures': [{
                'step_name':
                'a',
                'first_failure':
                3,
                'last_pass':
                None,
                'suspected_cls': [{
                    'repo_name': 'chromium',
                    'revision': 'r99_1',
                    'commit_position': 123,
                    'url': None,
                    'score': 5,
                    'hints': {
                        'added x/y/f99_1.cc (and it was in log)': 5,
                    }
                }],
            }, {
                'step_name': 'b',
                'first_failure': 2,
                'last_pass': None,
                'suspected_cls': [],
            }]
        }

        expected_failed_steps = {'chromium,r99_1': ['a']}
        failed_steps = check_duplicate_failures._GetFailedStepsForEachCL(
            analysis)
        self.assertEqual(expected_failed_steps, failed_steps)
Example #27
0
  def testNoResultIsReturnedWhenNoAnalysisIsCompleted(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 5

    master_url = 'https://build.chromium.org/p/%s' % master_name
    builds = {
        'builds': [
            {
                'master_url': master_url,
                'builder_name': builder_name,
                'build_number': build_number
            }
        ]
    }

    analysis = WfAnalysis.Create(master_name, builder_name, build_number)
    analysis.status = wf_analysis_status.ANALYZING
    analysis.result = None
    analysis.put()

    expected_result = []

    self._MockMasterIsSupported(supported=True)

    response = self.call_api('AnalyzeBuildFailures', body=builds)
    self.assertEqual(200, response.status_int)
    self.assertEqual(expected_result, response.json_body.get('results', []))
Example #28
0
def _UpdateAnalysis(master_name, builder_name, build_number, repo_name,
                    revision, cl_status):
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    if not analysis or not analysis.suspected_cls:
        return False

    num_correct = 0
    num_incorrect = 0
    for cl in analysis.suspected_cls:
        if cl['repo_name'] == repo_name and cl['revision'] == revision:
            # Updates this cl's status.
            cl['status'] = cl_status

        # Checks if all the cls have been triaged and checks the status of each cl
        # on the build.
        if cl.get('status') == suspected_cl_status.CORRECT:
            num_correct += 1
        elif cl.get('status') == suspected_cl_status.INCORRECT:
            num_incorrect += 1

    if num_correct + num_incorrect == len(
            analysis.suspected_cls):  # All triaged.
        if num_correct == 0:
            analysis.result_status = result_status.FOUND_INCORRECT
        elif num_incorrect == 0:
            analysis.result_status = result_status.FOUND_CORRECT
        else:
            analysis.result_status = result_status.PARTIALLY_CORRECT_FOUND

    analysis.put()
    return True
Example #29
0
    def testCorpUserCanViewAnalysisOfFailureOnUnsupportedMaster(
            self, mocked_ValidateAuthToken):
        mocked_ValidateAuthToken.side_effect = [(True, False)]
        master_name = 'm2'
        builder_name = 'b 1'
        build_number = 123
        build_url = buildbot.CreateBuildUrl(master_name, builder_name,
                                            build_number)

        analysis = WfAnalysis.Create(master_name, builder_name, build_number)
        analysis.status = analysis_status.COMPLETED
        analysis.put()

        self.mock_current_user(user_email='*****@*****.**', is_admin=False)

        response = self.test_app.post('/failure',
                                      params={
                                          'url': build_url,
                                          'xsrf_token': 'ab'
                                      },
                                      status=302)
        redirect_url = '/waterfall/failure?redirect=1&url=%s' % build_url
        self.assertTrue(
            response.headers.get('Location', '').endswith(redirect_url))

        self.assertEqual(0, len(self.taskqueue_stub.get_filtered_tasks()))
    def testTriggerFlakeAnalysesPipeline(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 2
        step_name = 'a_tests'
        test_name = 'Unittest1.Subtest1'

        analysis = WfAnalysis.Create(master_name, builder_name, build_number)
        analysis.failure_result_map = {
            step_name: {
                test_name:
                '%s/%s/%s' % (master_name, builder_name, build_number)
            }
        }
        analysis.put()

        swarming_task = WfSwarmingTask.Create(master_name, builder_name,
                                              build_number, step_name)
        swarming_task.tests_statuses = {test_name: {'SUCCESS': 1}}
        swarming_task.put()

        with mock.patch.object(flake_analysis_service,
                               'ScheduleAnalysisForFlake') as (
                                   mocked_ScheduleAnalysisForFlake):
            pipeline = TriggerFlakeAnalysesPipeline()
            pipeline.run(master_name, builder_name, build_number)
            mocked_ScheduleAnalysisForFlake.assert_called_once()
Example #31
0
def UpdateAbortedAnalysis(parameters):
    """Updates analysis and checks if there is enough information to run a try job
   even if analysis aborts.

  Args:
    parameters(AnalyzeCompileFailureInput): Inputs to analyze a compile failure.

  Returns:
    (WfAnalysis, bool): WfAnalysis object and a bool value indicates if can
      resume the try job or not.
  """
    master_name, builder_name, build_number = parameters.build_key.GetParts()
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    assert analysis, ('WfAnalysis Object for {}/{}/{} was missing'.format(
        master_name, builder_name, build_number))

    # Heuristic analysis could have already completed, while triggering the
    # try job kept failing and lead to the abort.
    run_try_job = False
    heuristic_aborted = False
    if not analysis.completed:
        # Heuristic analysis is aborted.
        analysis.status = analysis_status.ERROR
        analysis.result_status = None
        heuristic_aborted = True

        if analysis.failure_info:
            # We need failure_info to run try jobs,
            # while signals is optional for compile try jobs.
            run_try_job = True
    analysis.aborted = True
    analysis.put()
    return analysis, run_try_job, heuristic_aborted
Example #32
0
  def HandleGet(self):
    """Shows a list of Findit analysis results in HTML page.

    By default the page will display all the results under status FOUND_CORRECT,
    FOUND_INCORRECT and NOT_FOUND_INCORRECT.

    Available parameters:
      count: Parameter for number of analysis result to be displayed.
      result_status: Parameter to specify the result_status of the results.
      triage: Parameter for internal use. The page will display analysis results
        under status FOUND_INCORRECT, NOT_FOUND_INCORRECT, FOUND_UNTRIAGED and
        NOT_FOUND_UNTRIAGED.
      days: Parameter to decide only display results within a fixed amount of
        days. This parameter will turn off triage parameter and display all the
        results regardless of result_status.
    """
    status_code = int(self.request.get('result_status', '-1'))
    if status_code >= 0:
      analysis_query = WfAnalysis.query(WfAnalysis.result_status == status_code)
    elif self.request.get('triage') == '1':
      analysis_query = WfAnalysis.query(ndb.AND(
          WfAnalysis.result_status > result_status.FOUND_CORRECT,
          WfAnalysis.result_status <
          result_status.NOT_FOUND_CORRECT))
    else:
      analysis_query = WfAnalysis.query(ndb.AND(
          WfAnalysis.result_status >= result_status.FOUND_CORRECT,
          WfAnalysis.result_status < result_status.FOUND_UNTRIAGED))

    if self.request.get('count'):
      count = int(self.request.get('count'))
    else:
      count = _DEFAULT_DISPLAY_COUNT

    if self.request.get('days'):  # pragma: no cover
      start_date = datetime.datetime.utcnow() - datetime.timedelta(
          int(self.request.get('days')))
      start_date = start_date.replace(
          hour=0, minute=0, second=0, microsecond=0)

      if status_code >= 0:
        analysis_results = analysis_query.filter(
            WfAnalysis.build_start_time >= start_date).order(
            -WfAnalysis.build_start_time).fetch(count)
      else:
        analysis_results = WfAnalysis.query(
            WfAnalysis.build_start_time >= start_date).order(
            -WfAnalysis.build_start_time).fetch(count)
    else:
      analysis_results = analysis_query.order(
          WfAnalysis.result_status, -WfAnalysis.build_start_time).fetch(count)

    analyses = []

    def FormatDatetime(start_time):
      if not start_time:
        return None
      else:
        return start_time.strftime('%Y-%m-%d %H:%M:%S UTC')

    for analysis_result in analysis_results:
      analysis = {
          'master_name': analysis_result.master_name,
          'builder_name': analysis_result.builder_name,
          'build_number': analysis_result.build_number,
          'build_start_time': FormatDatetime(analysis_result.build_start_time),
          'status': analysis_result.status,
          'status_description': analysis_result.status_description,
          'suspected_cls': analysis_result.suspected_cls,
          'result_status': analysis_result.result_status_description
      }
      analyses.append(analysis)

    data = {
        'analyses': analyses,
        'triage': self.request.get('triage', '-1'),
        'days': self.request.get('days', '-1'),
        'count': self.request.get('count', '-1'),
        'result_status': self.request.get('result_status', '-1')
    }
    return {'template': 'list_analyses.html', 'data': data}