Esempio n. 1
0
    def _GetAllTryJobs(self, failure_result_map):
        """Returns all try jobs related to one build.

    Args:
      A dict to map each step/test with the key to the build when it failed the
      first time.
      {
          'step1': 'm/b/1',
          'step2': {
              'test1': 'm/b/1',
              'test2': 'm/b/2'
          }
      }

    Returns:
      A dict of try jobs like below:
      {
          'm/b/1': WfTryJob(
              key=Key('WfBuild', 'm/b/1'),...)
          ...
      }
    """
        if not failure_result_map:
            return {}

        try_jobs = {}
        for step_map in failure_result_map.values():
            if isinstance(step_map, basestring):
                try_jobs[step_map] = WfTryJob.Get(*step_map.split('/'))
            else:
                for task_key in step_map.values():
                    if not try_jobs.get(task_key):
                        try_jobs[task_key] = WfTryJob.Get(*task_key.split('/'))

        return try_jobs
  def testIdentifyCulpritForCompileTryJobNoCulprit(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 1
    try_job_id = '1'

    self._CreateEntities(master_name, builder_name, build_number, try_job_id)

    analysis = WfAnalysis.Create(master_name, builder_name, build_number)
    analysis.put()

    self.MockPipeline(RevertAndNotifyCulpritPipeline,
                      None,
                      expected_args=[master_name, builder_name, build_number,
                                     None, [], failure_type.COMPILE])
    pipeline = IdentifyTryJobCulpritPipeline(
        master_name, builder_name, build_number,
        failure_type.COMPILE, '1', None)
    pipeline.start()
    self.execute_queued_tasks()

    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    try_job_data = WfTryJobData.Get(try_job_id)

    self.assertEqual(analysis_status.COMPLETED, try_job.status)
    self.assertEqual([], try_job.compile_results)
    self.assertIsNone(try_job_data.culprits)
    self.assertIsNone(analysis.result_status)
    self.assertIsNone(analysis.suspected_cls)
Esempio n. 3
0
def ScheduleCompileTryJob(parameters, runner_id):
    master_name, builder_name, build_number = (parameters.build_key.GetParts())
    properties = GetBuildProperties(parameters)
    tryserver_mastername, tryserver_buildername = try_job_service.GetTrybot()

    build_id, error = try_job_service.TriggerTryJob(
        master_name, builder_name, tryserver_mastername, tryserver_buildername,
        properties,
        failure_type.GetDescriptionForFailureType(failure_type.COMPILE),
        parameters.cache_name, parameters.dimensions, runner_id)

    if error:
        raise exceptions.RetryException(error.reason, error.message)
    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    try_job.compile_results.append({'try_job_id': build_id})
    try_job.try_job_ids.append(build_id)
    try_job.put()
    try_job = try_job_service.UpdateTryJob(master_name, builder_name,
                                           build_number, build_id,
                                           failure_type.COMPILE)

    # Create a corresponding WfTryJobData entity to capture as much metadata as
    # early as possible.
    try_job_service.CreateTryJobData(build_id,
                                     try_job.key,
                                     bool(parameters.compile_targets),
                                     bool(parameters.suspected_revisions),
                                     failure_type.COMPILE,
                                     runner_id=runner_id)

    return build_id
Esempio n. 4
0
def CreateTestFailureAnalysisCompletionEvent(analysis):
    """Transforms a test failure analysis into an event proto.

  Args:
    analysis (WfAnalysis): The analysis to be transformed.

  Returns:
    ([TestAnalysisCompletionEvent]) Proto used to report to BQ table.
  """
    events = []

    for step in analysis.failure_info.get('failed_steps', {}):
        for test in analysis.failure_info['failed_steps'][step].get(
                'tests') or {}:
            if analysis.flaky_tests and test in analysis.flaky_tests.get(
                    step, []):
                # The test is flaky, should report it in flake analysis.
                continue

            # If the failure result mapping isn't there, then bailout since it
            # contains required information.
            if (not analysis.failure_result_map
                    or not analysis.failure_result_map.get(step)
                    or not analysis.failure_result_map[step].get(test)):
                continue
            event = TestAnalysisCompletionEvent()
            event.flake = False
            _ExtractGeneralAnalysisInfo(analysis, event)

            event.analysis_info.step_name = step
            event.test_name = test

            # Extract master/builder/build_number from failure_result_Map.
            master, builder, build_number = (
                analysis.failure_result_map[step][test].split('/'))
            event.analysis_info.culprit_build_number = int(build_number)

            # Culprit.
            try_job = WfTryJob.Get(master, builder, build_number)
            if (try_job and try_job.test_results
                    and try_job.test_results[-1].get('culprit')
                    and try_job.test_results[-1]['culprit'].get(step)
                    and try_job.test_results[-1]['culprit'][step].get('tests')
                    and try_job.test_results[-1]['culprit'][step]['tests'].get(
                        test)):
                _ExtractCulpritForDict(
                    try_job.test_results[-1]['culprit'][step]['tests'][test],
                    event)

            event = _ExtractSuspectsForWfAnalysis(analysis, event)

            # Outcomes.
            _SetOutcomesForEvent(event)

            # Actions.
            _SetActionsForEvent(event)

            events.append(event)

    return events
Esempio n. 5
0
def ReviveOrCreateTryJobEntity(master_name, builder_name, build_number,
                               force_try_job):
    """Checks try job entity to further determine if need a new try job.

  * If there is an entity for a running or completed try job, no need for new
    job.
  * If there is an entity for a failed try job, revive the entity and start a
    new job.
  * If there is no entity, create one.

  Returns:
    A bool to indicate if a try job entity is revived or created.
    The try job entities' key.
  """
    try_job_entity_revived_or_created = True
    try_job = WfTryJob.Get(master_name, builder_name, build_number)

    if try_job:
        if try_job.failed or force_try_job:
            try_job.status = analysis_status.PENDING
            try_job.put()
        else:
            try_job_entity_revived_or_created = False
    else:
        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        try_job.put()

    return try_job_entity_revived_or_created, try_job.key.urlsafe()
    def testReturnNoneIfNoTryJob(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 8

        WfTryJob.Create(master_name, builder_name, build_number).put()

        self.MockGeneratorPipeline(
            pipeline_class=RevertAndNotifyTestCulpritPipeline,
            expected_input=CulpritActionParameters(
                build_key=BuildKey(master_name=master_name,
                                   builder_name=builder_name,
                                   build_number=build_number),
                culprits=DictOfBasestring(),
                heuristic_cls=ListOfBasestring(),
                failure_to_culprit_map=None),
            mocked_output=False)
        parameters = IdentifyTestTryJobCulpritParameters(build_key=BuildKey(
            master_name=master_name,
            builder_name=builder_name,
            build_number=build_number),
                                                         result=None)
        pipeline = IdentifyTestTryJobCulpritPipeline(parameters)
        pipeline.start()
        self.execute_queued_tasks()

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual(try_job.test_results, [])
        self.assertEqual(try_job.status, analysis_status.COMPLETED)
  def testIdentifyCulpritForFlakyCompile(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 1
    try_job_id = '1'

    compile_result = {
        'report': {
            'result': {
                'rev1': 'failed',
                'rev2': 'failed'
            },
            'metadata': {
                'sub_ranges': [
                  [
                      None,
                      'rev2'
                  ]
                ]
            }
        },
        'url': 'url',
        'try_job_id': try_job_id,
    }

    self._CreateEntities(master_name, builder_name, build_number, try_job_id)

    analysis = WfAnalysis.Create(master_name, builder_name, build_number)
    analysis.result = {
        'failures': [
            {
                'step_name': 'compile',
                'suspected_cls': []
            }
        ]
    }
    analysis.put()

    self.MockPipeline(RevertAndNotifyCulpritPipeline,
                      None,
                      expected_args=[master_name, builder_name, build_number,
                                     {}, [], failure_type.COMPILE])
    pipeline = IdentifyTryJobCulpritPipeline(
        master_name, builder_name, build_number,
        failure_type.COMPILE, '1', compile_result)
    pipeline.start()
    self.execute_queued_tasks()

    try_job = WfTryJob.Get(master_name, builder_name, build_number)

    self.assertEqual(analysis_status.COMPLETED, try_job.status)

    try_job_data = WfTryJobData.Get(try_job_id)
    self.assertIsNone(try_job_data.culprits)

    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    self.assertEqual(result_status.FLAKY, analysis.result_status)
    self.assertEqual([], analysis.suspected_cls)
Esempio n. 8
0
def UpdateTryJobResult(parameters, culprits):
    master_name, builder_name, build_number = parameters.build_key.GetParts()
    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    new_result = parameters.result.ToSerializable(
    ) if parameters.result else {}
    try_job_id = parameters.result.try_job_id if parameters.result else None
    if culprits:
        try_job_service.UpdateTryJobResult(try_job.compile_results, new_result,
                                           try_job_id)
    try_job.status = analysis_status.COMPLETED
    try_job.put()
Esempio n. 9
0
def UpdateTryJob(master_name, builder_name, build_number, build_id,
                 try_job_type):
    try_job = WfTryJob.Get(master_name, builder_name, build_number)

    if try_job_type == failure_type.COMPILE:
        try_job.compile_results.append({'try_job_id': build_id})
    else:
        try_job.test_results.append({'try_job_id': build_id})
    try_job.try_job_ids.append(build_id)
    try_job.put()
    return try_job
    def testSuccessfullyScheduleNewTryJobForCompileWithSuspectedRevisions(
            self, mock_module):
        master_name = 'm'
        builder_name = 'b'
        build_number = 223
        good_revision = 'rev1'
        bad_revision = 'rev2'
        build_id = '1'
        url = 'url'
        build = WfBuild.Create(master_name, builder_name, build_number)
        build.data = {
            'properties': {
                'parent_mastername': 'pm',
                'parent_buildername': 'pb'
            }
        }
        build.put()

        response = {
            'build': {
                'id': build_id,
                'url': url,
                'status': 'SCHEDULED',
            }
        }
        results = [(None,
                    buildbucket_client.BuildbucketBuild(response['build']))]
        mock_module.TriggerTryJobs.return_value = results

        WfTryJob.Create(master_name, builder_name, build_number).put()

        try_job_pipeline = ScheduleCompileTryJobPipeline()
        try_job_id = try_job_pipeline.run(master_name, builder_name,
                                          build_number, good_revision,
                                          bad_revision, failure_type.COMPILE,
                                          None, ['r5'], None, None)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        try_job_data = WfTryJobData.Get(build_id)

        expected_try_job_id = '1'
        self.assertEqual(expected_try_job_id, try_job_id)
        self.assertEqual(expected_try_job_id,
                         try_job.compile_results[-1]['try_job_id'])
        self.assertTrue(expected_try_job_id in try_job.try_job_ids)
        self.assertIsNotNone(try_job_data)
        self.assertEqual(try_job_data.master_name, master_name)
        self.assertEqual(try_job_data.builder_name, builder_name)
        self.assertEqual(try_job_data.build_number, build_number)
        self.assertEqual(
            try_job_data.try_job_type,
            failure_type.GetDescriptionForFailureType(failure_type.COMPILE))
        self.assertFalse(try_job_data.has_compile_targets)
        self.assertTrue(try_job_data.has_heuristic_results)
Esempio n. 11
0
    def testGet(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 123
        try_job_id = 'try_job_id'

        try_job_before = WfTryJob.Create(master_name, builder_name,
                                         build_number)
        try_job_before.try_job_ids = [try_job_id]
        try_job_before.put()

        try_job_after = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual([try_job_id], try_job_after.try_job_ids)
    def run(self, master_name, builder_name, build_number, good_revision,
            bad_revision, try_job_type, suspected_revisions, cache_name,
            dimensions, *task_results):
        """
    Args:
      master_name (str): the master name of a build.
      builder_name (str): the builder name of a build.
      build_number (int): the build number of a build.
      good_revision (str): the revision of the last passed build.
      bad__revision (str): the revision of the first failed build.
      try_job_type (int): type of the try job: TEST in this case.
      suspected_revisions (list): a list of suspected revisions from heuristic.
      cache_name (str): A string to identify separate directories for different
          waterfall bots on the trybots.
      dimensions (list): A list of strings in the format
          ["key1:value1", "key2:value2"].
      task_results (list): a list of reliable failed tests.

    Returns:
      build_id (str): id of the triggered try job.
    """

        properties = self._GetBuildProperties(master_name, builder_name,
                                              build_number, good_revision,
                                              bad_revision, try_job_type,
                                              suspected_revisions)

        targeted_tests = _GetTargetedTests(dict(task_results))
        if not targeted_tests:  # pragma: no cover
            logging.info('All tests are flaky, no try job will be triggered.')
            return

        additional_parameters = {'tests': targeted_tests}

        build_id = self._TriggerTryJob(
            master_name, builder_name, properties, additional_parameters,
            failure_type.GetDescriptionForFailureType(failure_type.TEST),
            cache_name, dimensions)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        try_job.test_results.append({'try_job_id': build_id})
        try_job.try_job_ids.append(build_id)
        try_job.put()

        # Create a corresponding WfTryJobData entity to capture as much metadata as
        # early as possible.
        self._CreateTryJobData(build_id, try_job.key,
                               bool(suspected_revisions))

        return build_id
Esempio n. 13
0
def main():
    start = datetime(2017, 12, 1, 0, 0, 0)
    cursor = None
    more = True

    groups_with_different_results = defaultdict(list)
    groups = defaultdict(list)

    while more:
        analyses, cursor, more = WfAnalysis.query(
            WfAnalysis.build_start_time >= start).fetch_page(
                100, start_cursor=cursor)

        for analysis in analyses:
            if (analysis.status != analysis_status.COMPLETED
                    or not analysis.failure_group_key
                    or analysis.failure_type != failure_type.COMPILE):
                continue

            group_key = '/'.join(str(x) for x in analysis.failure_group_key)
            culprit = None
            try_job = WfTryJob.Get(*analysis.key.pairs()[0][1].split('/'))
            if try_job and try_job.compile_results:
                culprit = try_job.compile_results[-1].get('culprit')

            same_result = False
            for item in groups[group_key]:
                if (item['culprit'] != culprit
                        or item['suspects'] != analysis.suspected_cls):
                    continue
                same_result = True
                item['builds'].append(analysis.key.pairs()[0][1])
                break

            if same_result:
                continue

            new_result = {
                'suspects': analysis.suspected_cls,
                'culprit': culprit,
                'builds': [analysis.key.pairs()[0][1]]
            }
            groups[group_key].append(new_result)

    for key, item in groups.iteritems():
        if len(item) > 1:
            groups_with_different_results[key] = item

    _DisplayResults(groups_with_different_results, groups)
    def testUpdateTryJobResultAnalyzing(self):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        try_job_id = '3'

        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        try_job.put()

        pipeline = MonitorTryJobPipeline()
        pipeline._UpdateTryJobResult(
            try_job.key.urlsafe(), failure_type.TEST, try_job_id, 'url',
            buildbucket_client.BuildbucketBuild.STARTED)
        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual(analysis_status.RUNNING, try_job.status)
Esempio n. 15
0
def _ReviveOrCreateTryJobEntity(master_name, builder_name, build_number,
                                force_try_job):
    try_job_entity_revived_or_created = True
    try_job = WfTryJob.Get(master_name, builder_name, build_number)

    if try_job:
        if try_job.failed or force_try_job:
            try_job.status = analysis_status.PENDING
            try_job.put()
        else:
            try_job_entity_revived_or_created = False
    else:
        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        try_job.put()

    return try_job_entity_revived_or_created, try_job.key
Esempio n. 16
0
def CreateCompileFailureAnalysisCompletionEvent(analysis):
    """Transforms a compile failure analysis to an event proto.

  Args:
    analysis (WfAnalysis): The analysis to be transformed.
  Returns:
    (CompileAnalysisCompletionEvent) Proto used to report to BQ table.
  """
    event = CompileAnalysisCompletionEvent()
    _ExtractGeneralAnalysisInfo(analysis, event)
    event.analysis_info.step_name = 'compile'

    if (analysis.failure_info and analysis.failure_info.get('failed_steps')
            and analysis.failure_info['failed_steps'].get('compile')
            and analysis.failure_info['failed_steps']['compile'].get(
                'first_failure')):
        event.analysis_info.culprit_build_number = (
            analysis.failure_info['failed_steps']['compile']['first_failure'])

    try_job = WfTryJob.Get(analysis.master_name, analysis.builder_name,
                           event.analysis_info.culprit_build_number)
    # Culprit.
    if (try_job and try_job.compile_results
            and try_job.compile_results[-1].get('culprit')
            and try_job.compile_results[-1]['culprit'].get('compile')):
        _ExtractCulpritForDict(
            try_job.compile_results[-1]['culprit']['compile'], event)

    event = _ExtractSuspectsForWfAnalysis(analysis, event)

    if (analysis.signals and analysis.signals.get('compile')
            and analysis.signals['compile'].get('failed_edges')):
        # Use a set to avoid adding duplicates here.
        rules = set()
        for edge in analysis.signals['compile']['failed_edges']:
            rules.add(edge['rule'])
        event.failed_build_rules.extend(rules)

    # Outcomes.
    _SetOutcomesForEvent(event)

    # Actions.
    _SetActionsForEvent(event)

    return event
Esempio n. 17
0
  def testSuccessfullyScheduleNewTryJobForTest(self, mock_module):
    master_name = 'm'
    builder_name = 'b'
    build_number = 223
    good_revision = 'rev1'
    bad_revision = 'rev2'
    targeted_tests = ['a on platform', ['a', ['test1', 'test2']]]
    build_id = '1'
    build = WfBuild.Create(master_name, builder_name, build_number)
    build.data = {'properties': {'parent_mastername': 'pm',
                                 'parent_buildername': 'pb'}}
    build.put()

    response = {
        'build': {
            'id': build_id,
            'url': 'url',
            'status': 'SCHEDULED',
        }
    }
    results = [(None, buildbucket_client.BuildbucketBuild(response['build']))]
    mock_module.TriggerTryJobs.return_value = results

    WfTryJob.Create(master_name, builder_name, build_number).put()

    try_job_pipeline = ScheduleTestTryJobPipeline()
    try_job_id = try_job_pipeline.run(
        master_name, builder_name, build_number, good_revision, bad_revision,
        failure_type.TEST, None, None, None, targeted_tests)

    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    self.assertEqual(try_job_id, build_id)
    self.assertEqual(try_job.test_results[-1]['try_job_id'], build_id)

    try_job_data = WfTryJobData.Get(try_job_id)
    self.assertIsNotNone(try_job_data)
    self.assertEqual(try_job_data.master_name, master_name)
    self.assertEqual(try_job_data.builder_name, builder_name)
    self.assertEqual(try_job_data.build_number, build_number)
    self.assertEqual(
        try_job_data.try_job_type,
        failure_type.GetDescriptionForFailureType(failure_type.TEST))
    self.assertFalse(try_job_data.has_compile_targets)
    self.assertFalse(try_job_data.has_heuristic_results)
    def run(self, master_name, builder_name, build_number, good_revision,
            bad_revision, try_job_type, compile_targets, suspected_revisions,
            cache_name, dimensions):
        """
    Args:
      master_name (str): the master name of a build.
      builder_name (str): the builder name of a build.
      build_number (int): the build number of a build.
      good_revision (str): the revision of the last passed build.
      bad__revision (str): the revision of the first failed build.
      try_job_type (int): type of the try job: COMPILE in this case.
      compile_targets (list): a list of failed output nodes.
      suspected_revisions (list): a list of suspected revisions from heuristic.
      cache_name (str): A string to identify separate directories for different
          waterfall bots on the trybots.
      dimensions (list): A list of strings in the format
          ["key1:value1", "key2:value2"].

    Returns:
      build_id (str): id of the triggered try job.
    """

        properties = self._GetBuildProperties(master_name, builder_name,
                                              build_number, good_revision,
                                              bad_revision, try_job_type,
                                              suspected_revisions)
        additional_parameters = {'compile_targets': compile_targets}

        build_id = self._TriggerTryJob(
            master_name, builder_name, properties, additional_parameters,
            failure_type.GetDescriptionForFailureType(failure_type.COMPILE),
            cache_name, dimensions)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        try_job.compile_results.append({'try_job_id': build_id})
        try_job.try_job_ids.append(build_id)
        try_job.put()

        # Create a corresponding WfTryJobData entity to capture as much metadata as
        # early as possible.
        self._CreateTryJobData(build_id, try_job.key, bool(compile_targets),
                               bool(suspected_revisions))

        return build_id
Esempio n. 19
0
def UpdateTryJobResult(parameters, culprits):
    """ Updates try job result with culprit info.
  Args:
    parameters (IdentifyTestTryJobCulpritParameters): Parameters to identify
      culprit from try job result.
    culprits (dict): A dict of culprits info: revision, repo_name,
      commit_position and url.

  """
    master_name, builder_name, build_number = (parameters.build_key.GetParts())
    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    new_result = parameters.result.ToSerializable(
    ) if parameters.result else {}
    try_job_id = parameters.result.try_job_id if parameters.result else None
    if culprits:
        try_job_service.UpdateTryJobResult(try_job.test_results, new_result,
                                           try_job_id)
    try_job.status = analysis_status.COMPLETED
    try_job.put()
  def testReturnNoneIfNoTryJob(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 8

    WfTryJob.Create(master_name, builder_name, build_number).put()

    self.MockPipeline(RevertAndNotifyCulpritPipeline,
                      None,
                      expected_args=[master_name, builder_name, build_number,
                                     None, [], failure_type.TEST])
    pipeline = IdentifyTryJobCulpritPipeline(
        master_name, builder_name, build_number,
        failure_type.TEST, None, None)
    pipeline.start()
    self.execute_queued_tasks()

    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    self.assertEqual(try_job.test_results, [])
    self.assertEqual(try_job.status, analysis_status.COMPLETED)
  def testIdentifyCulpritForCompileReturnNoneIfAllPassed(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 1
    try_job_id = '1'

    compile_result = {
        'report': {
            'result': {
                'rev1': 'passed',
                'rev2': 'passed'
            }
        },
        'url': 'url',
        'try_job_id': try_job_id,
    }

    self._CreateEntities(master_name, builder_name, build_number, try_job_id)

    analysis = WfAnalysis.Create(master_name, builder_name, build_number)
    analysis.put()

    self.MockPipeline(RevertAndNotifyCulpritPipeline,
                      None,
                      expected_args=[master_name, builder_name, build_number,
                                     {}, [], failure_type.COMPILE])
    pipeline = IdentifyTryJobCulpritPipeline(
        master_name, builder_name, build_number,
        failure_type.COMPILE, '1', compile_result)
    pipeline.start()
    self.execute_queued_tasks()

    try_job = WfTryJob.Get(master_name, builder_name, build_number)

    self.assertEqual(analysis_status.COMPLETED, try_job.status)

    try_job_data = WfTryJobData.Get(try_job_id)
    self.assertIsNone(try_job_data.culprits)

    self.assertIsNone(analysis.result_status)
    self.assertIsNone(analysis.suspected_cls)
Esempio n. 22
0
def _PrepareTryJobDataForCompileFailure(analysis):
    try_job_data = {}
    if not (analysis.failure_result_map and  # pragma: no branch.
            constants.COMPILE_STEP_NAME in analysis.failure_result_map):
        return try_job_data  # pragma: no cover.

    referred_build_keys = build_util.GetBuildInfoFromId(
        analysis.failure_result_map[constants.COMPILE_STEP_NAME])
    try_job = WfTryJob.Get(*referred_build_keys)
    if not try_job or not try_job.compile_results:
        return try_job_data  # pragma: no cover.
    result = try_job.compile_results[-1]

    try_job_data['status'] = analysis_status.STATUS_TO_DESCRIPTION.get(
        try_job.status, 'unknown').lower()
    try_job_data['url'] = result.get('url')
    try_job_data['completed'] = try_job.completed
    try_job_data['failed'] = try_job.failed
    try_job_data['culprit'] = result.get('culprit',
                                         {}).get(constants.COMPILE_STEP_NAME)

    return try_job_data
Esempio n. 23
0
def _GetTryJobResultForCompile(failure_result_map):
  try_job_key = failure_result_map['compile']
  referred_build_keys = BaseBuildModel.GetBuildInfoFromBuildKey(try_job_key)
  culprit_info = defaultdict(lambda: defaultdict(list))

  try_job = WfTryJob.Get(*referred_build_keys)
  if not try_job or try_job.test_results:
    return culprit_info

  try_job_result = (
      try_job.compile_results[-1] if try_job.compile_results else None)

  compile_try_job = {'try_job_key': try_job_key, 'status': try_job.status}

  if try_job_result:
    if try_job_result.get('url'):
      compile_try_job['try_job_url'] = try_job_result['url']
      compile_try_job['try_job_build_number'] = (
          _GetTryJobBuildNumber(try_job_result))
    if try_job_result.get('culprit', {}).get('compile'):
      compile_try_job['culprit'] = try_job_result['culprit']['compile']

  culprit_info['compile']['try_jobs'].append(compile_try_job)
  return culprit_info
  while more:
    analyses, cursor, more = WfAnalysis.query(
        ndb.AND(WfAnalysis.build_start_time >= start,
                WfAnalysis.build_start_time < end)).fetch_page(
                    100, start_cursor=cursor)

    for analysis in analyses:
      if not analysis.completed or not analysis.result:
        continue

      build_key = analysis.key.pairs()[0][1]
      master, builder_name, build_number = (
          BaseBuildModel.GetBuildInfoFromBuildKey(build_key))
      build_number = int(build_number)

      try_job = WfTryJob.Get(master, builder_name, build_number)

      for failure in analysis.result.get('failures', {}):

        result = None
        step_name = failure['step_name']
        culprits = _GetTestTryJobCulprit(try_job, step_name)
        if failure.get('tests'):
          for test in failure['tests']:
            if test['first_failure'] != build_number:
              # Not first time failure.
              continue

            test_name = test['test_name']

            result = Result(step_name, test_name)
Esempio n. 25
0
def _GetCulpritInfoForTryJobResultForTest(try_job_key, culprits_info):
  referred_build_keys = BaseBuildModel.GetBuildInfoFromBuildKey(try_job_key)
  try_job = WfTryJob.Get(*referred_build_keys)

  if not try_job or try_job.compile_results:
    return

  try_job_result = try_job.test_results[-1] if try_job.test_results else None

  for step_try_jobs in culprits_info.values():
    # If try job found different culprits for each test, split tests by culprit.
    additional_tests_culprit_info = []

    for try_job_info in step_try_jobs['try_jobs']:
      if (try_job_key != try_job_info['try_job_key'] or
          try_job_info.get('status')):
        # Conditions that try_job_info has status are:
        # If there is no swarming task, there won't be try job;
        # If the swarming task is not completed yet, there won't be try job yet;
        # If there are flaky tests found, those tests will be marked as flaky,
        # and no try job for them will be triggered.
        continue

      try_job_info['status'] = try_job.status
      if try_job_result:
        # Needs to use ref_name to match step_name in try job.
        ref_name = try_job_info['ref_name']
        # Saves try job information.
        if try_job_result.get('url'):  # pragma: no cover
          try_job_info['try_job_url'] = try_job_result['url']
          try_job_info['try_job_build_number'] = (
              _GetTryJobBuildNumber(try_job_result))

        if (try_job_result.get('culprit') and
            try_job_result['culprit'].get(ref_name)):
          # Saves try job culprits information.

          # Uses culprits to group tests.
          culprit_tests_map = _OrganizeTryJobResultByCulprits(
              try_job_result['culprit'][ref_name])

          ungrouped_tests = try_job_info.get('tests', [])
          list_of_culprits = []
          for culprit_info in culprit_tests_map.values():
            failed_tests = culprit_info['failed_tests']
            list_of_culprits.append(culprit_info)
            # Gets tests that haven't been grouped.
            ungrouped_tests = list(set(ungrouped_tests) ^ set(failed_tests))
            if not ungrouped_tests:
              # All tests have been grouped.
              break

          index_start = 1
          if ungrouped_tests:
            # There are tests don't have try job culprits.
            # Group these tests together.
            # Save them in current try_job_info.
            try_job_info['tests'] = ungrouped_tests
            try_job_info['culprit'] = {}
            # Saves all the tests that have culprits later.
            index_start = 0
          else:
            # Saves the first culprit in current try_job_info.
            # Saves all the other culprits later.
            try_job_info['culprit'] = {
                'revision':
                    list_of_culprits[0]['revision'],
                'commit_position':
                    list_of_culprits[0]['commit_position'],
                'review_url':
                    list_of_culprits[0].get(
                        'url', list_of_culprits[0].get('review_url', None))
            }
            try_job_info['tests'] = list_of_culprits[0]['failed_tests']

          for n in xrange(index_start, len(list_of_culprits)):
            # Appends the rest of test groups to step_try_jobs['try_jobs'].
            iterate_culprit = list_of_culprits[n]
            tmp_try_job_info = copy.deepcopy(try_job_info)
            tmp_try_job_info['culprit'] = {
                'revision':
                    iterate_culprit['revision'],
                'commit_position':
                    iterate_culprit['commit_position'],
                'review_url':
                    iterate_culprit.get('url',
                                        iterate_culprit.get('review_url', None))
            }
            tmp_try_job_info['tests'] = iterate_culprit['failed_tests']
            additional_tests_culprit_info.append(tmp_try_job_info)

    if additional_tests_culprit_info:
      step_try_jobs['try_jobs'].extend(additional_tests_culprit_info)
    def testGetTryJobsForTestSuccess(self, mock_buildbucket, mock_report):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        try_job_id = '3'

        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        try_job.test_results = [{
            'report': None,
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'try_job_id': try_job_id,
        }]
        try_job.status = analysis_status.RUNNING
        try_job.put()

        try_job_data = WfTryJobData.Create(try_job_id)
        try_job_data.try_job_key = try_job.key
        try_job_data.try_job_url = (
            'https://build.chromium.org/p/m/builders/b/builds/1234')
        try_job_data.put()

        data = [{
            'build': {
                'id': '3',
                'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
                'status': 'STARTED'
            }
        }, {
            'error': {
                'reason': 'BUILD_NOT_FOUND',
                'message': 'message',
            }
        }, {
            'build': {
                'id': '3',
                'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
                'status': 'STARTED'
            }
        }, {
            'error': {
                'reason': 'BUILD_NOT_FOUND',
                'message': 'message',
            }
        }, {
            'build': {
                'id': '3',
                'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
                'status': 'COMPLETED',
            }
        }]

        report = {
            'result': {
                'rev1': {
                    'a_test': {
                        'status': 'passed',
                        'valid': True
                    }
                },
                'rev2': {
                    'a_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['test1', 'test2']
                    }
                }
            }
        }

        get_tryjobs_responses = [
            [(None, buildbucket_client.BuildbucketBuild(data[0]['build']))],
            [(buildbucket_client.BuildbucketError(data[1]['error']), None)],
            [(None, buildbucket_client.BuildbucketBuild(data[2]['build']))],
            [(buildbucket_client.BuildbucketError(data[3]['error']), None)],
            [(None, buildbucket_client.BuildbucketBuild(data[4]['build']))],
        ]
        mock_buildbucket.GetTryJobs.side_effect = get_tryjobs_responses
        mock_report.return_value = json.dumps(report)

        pipeline = MonitorTryJobPipeline()
        pipeline.start_test()
        pipeline.run(try_job.key.urlsafe(), failure_type.TEST, try_job_id)
        pipeline.run(try_job.key.urlsafe(), failure_type.TEST, try_job_id)
        # Since run() calls callback() immediately, we use -1.
        for _ in range(len(get_tryjobs_responses) - 1):
            pipeline.callback(callback_params=pipeline.last_params)

        # Reload from ID to get all internal properties in sync.
        pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
        pipeline.finalized()
        test_result = pipeline.outputs.default.value

        expected_test_result = {
            'report': {
                'result': {
                    'rev1': {
                        'a_test': {
                            'status': 'passed',
                            'valid': True
                        }
                    },
                    'rev2': {
                        'a_test': {
                            'status': 'failed',
                            'valid': True,
                            'failures': ['test1', 'test2']
                        }
                    }
                }
            },
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'try_job_id': '3',
        }
        self.assertEqual(expected_test_result, test_result)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual(expected_test_result, try_job.test_results[-1])
        self.assertEqual(analysis_status.RUNNING, try_job.status)
  def testIdentifyCulpritForCompileTryJobSuccess(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 1
    try_job_id = '1'

    compile_result = {
        'report': {
            'result': {
                'rev1': 'passed',
                'rev2': 'failed'
            },
            'culprit': 'rev2'
        },
        'try_job_id': try_job_id,
    }

    self._CreateEntities(master_name, builder_name, build_number, try_job_id,
                         try_job_status=analysis_status.RUNNING,
                         compile_results=[compile_result])
    analysis = WfAnalysis.Create(master_name, builder_name, build_number)
    analysis.put()

    expected_culprit = 'rev2'
    expected_suspected_cl = {
        'revision': 'rev2',
        'commit_position': 2,
        'url': 'url_2',
        'repo_name': 'chromium'
    }
    expected_compile_result = {
        'report': {
            'result': {
                'rev1': 'passed',
                'rev2': 'failed'
            },
            'culprit': 'rev2'
        },
        'try_job_id': try_job_id,
        'culprit': {
            'compile': expected_suspected_cl
        }
    }
    expected_analysis_suspected_cls = [{
        'revision': 'rev2',
        'commit_position': 2,
        'url': 'url_2',
        'repo_name': 'chromium',
        'failures': {'compile': []},
        'top_score': None
    }]

    self.MockPipeline(RevertAndNotifyCulpritPipeline,
                      None,
                      expected_args=[master_name, builder_name, build_number,
                                     {expected_culprit: expected_suspected_cl},
                                     [], failure_type.COMPILE])
    pipeline = IdentifyTryJobCulpritPipeline(
        master_name, builder_name, build_number,
        failure_type.COMPILE, '1', compile_result)
    pipeline.start()
    self.execute_queued_tasks()

    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    self.assertEqual(expected_compile_result, try_job.compile_results[-1])
    self.assertEqual(analysis_status.COMPLETED, try_job.status)

    try_job_data = WfTryJobData.Get(try_job_id)
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    self.assertEqual({'compile': expected_culprit}, try_job_data.culprits)
    self.assertEqual(analysis.result_status,
                     result_status.FOUND_UNTRIAGED)
    self.assertEqual(analysis.suspected_cls, expected_analysis_suspected_cls)
    def testGetTryJobsForCompileSuccessBackwardCompatibleCallback(
            self, mock_buildbucket, mock_report):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        try_job_id = '1'
        regression_range_size = 2

        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        try_job_data = WfTryJobData.Create(try_job_id)
        try_job_data.try_job_key = try_job.key
        try_job_data.try_job_url = (
            'https://build.chromium.org/p/m/builders/b/builds/1234')
        try_job_data.put()
        try_job.compile_results = [{
            'report': None,
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'try_job_id': '1',
        }]
        try_job.status = analysis_status.RUNNING
        try_job.put()

        report = {
            'result': {
                'rev1': 'passed',
                'rev2': 'failed'
            },
            'metadata': {
                'regression_range_size': 2
            }
        }

        build_response = {
            'id': '1',
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'status': 'COMPLETED',
            'completed_ts': '1454367574000000',
            'created_ts': '1454367570000000',
            'updated_ts': '1454367574000000',
        }
        mock_buildbucket.GetTryJobs.return_value = [
            (None, buildbucket_client.BuildbucketBuild(build_response))
        ]
        mock_report.return_value = json.dumps(report)

        pipeline = MonitorTryJobPipeline(try_job.key.urlsafe(),
                                         failure_type.COMPILE, try_job_id)
        pipeline.start_test()
        pipeline.run(try_job.key.urlsafe(), failure_type.COMPILE, try_job_id)
        pipeline.callback(**pipeline.last_params)

        # Reload from ID to get all internal properties in sync.
        pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
        pipeline.finalized()
        compile_result = pipeline.outputs.default.value

        expected_compile_result = {
            'report': {
                'result': {
                    'rev1': 'passed',
                    'rev2': 'failed'
                },
                'metadata': {
                    'regression_range_size': regression_range_size
                }
            },
            'url': 'https://build.chromium.org/p/m/builders/b/builds/1234',
            'try_job_id': '1',
        }

        self.assertEqual(expected_compile_result, compile_result)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual(expected_compile_result, try_job.compile_results[-1])
        self.assertEqual(analysis_status.RUNNING, try_job.status)

        try_job_data = WfTryJobData.Get(try_job_id)
        self.assertEqual(try_job_data.regression_range_size,
                         regression_range_size)
        self.assertIsInstance(try_job_data.start_time, datetime)
  def testIdentifyCulpritForTestTryJobSuccess(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 1
    try_job_id = '1'

    test_result = {
        'report': {
            'result': {
                'rev0': {
                    'a_test': {
                        'status': 'passed',
                        'valid': True,
                    },
                    'b_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['b_test1']
                    }
                },
                'rev1': {
                    'a_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['a_test1']
                    },
                    'b_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['b_test1']
                    }
                },
                'rev2': {
                    'a_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['a_test1', 'a_test2']
                    },
                    'b_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['b_test1']
                    }
                }
            },
            'culprits': {
                'a_test': {
                    'a_test1': 'rev1',
                    'a_test2': 'rev2'
                },
            },
            'flakes': {
                'b_test': ['b_test1']
            }
        },
        'url': 'url',
        'try_job_id': try_job_id
    }

    self._CreateEntities(master_name, builder_name, build_number, try_job_id,
                         try_job_status=analysis_status.RUNNING,
                         test_results=[test_result])

    analysis = WfAnalysis.Create(master_name, builder_name, build_number)
    analysis.put()

    a_test1_suspected_cl = {
        'revision': 'rev1',
        'commit_position': 1,
        'url': 'url_1',
        'repo_name': 'chromium'
    }
    a_test2_suspected_cl = {
        'revision': 'rev2',
        'commit_position': 2,
        'url': 'url_2',
        'repo_name': 'chromium'
    }

    expected_test_result = {
        'report': {
            'result': {
                'rev0': {
                    'a_test': {
                        'status': 'passed',
                        'valid': True,
                    },
                    'b_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['b_test1']
                    }
                },
                'rev1': {
                    'a_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['a_test1']
                    },
                    'b_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['b_test1']
                    }
                },
                'rev2': {
                    'a_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['a_test1', 'a_test2']
                    },
                    'b_test': {
                        'status': 'failed',
                        'valid': True,
                        'failures': ['b_test1']
                    }
                }
            },
            'culprits': {
                'a_test': {
                    'a_test1': 'rev1',
                    'a_test2': 'rev2'
                },
            },
            'flakes': {
                'b_test': ['b_test1']
            }
        },
        'url': 'url',
        'try_job_id': try_job_id,
        'culprit': {
            'a_test': {
                'tests': {
                    'a_test1': a_test1_suspected_cl,
                    'a_test2': a_test2_suspected_cl
                }
            }
        }
    }

    expected_culprits = {
      'rev1': a_test1_suspected_cl,
      'rev2': a_test2_suspected_cl
    }

    self.MockPipeline(RevertAndNotifyCulpritPipeline,
                      None,
                      expected_args=[master_name, builder_name, build_number,
                                     expected_culprits, [], failure_type.TEST])
    pipeline = IdentifyTryJobCulpritPipeline(
        master_name, builder_name, build_number,
        failure_type.TEST, '1', test_result)
    pipeline.start()
    self.execute_queued_tasks()

    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    self.assertEqual(expected_test_result, try_job.test_results[-1])
    self.assertEqual(analysis_status.COMPLETED, try_job.status)

    try_job_data = WfTryJobData.Get(try_job_id)
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    expected_culprit_data = {
        'a_test': {
            'a_test1': 'rev1',
            'a_test2': 'rev2',
        }
    }

    expected_cls = [
        {
            'revision': 'rev1',
            'commit_position': 1,
            'url': 'url_1',
            'repo_name': 'chromium',
            'failures': {
                'a_test': ['a_test1'],
                'b_test': ['b_test1'],
            },
            'top_score': None
        },
        {
            'revision': 'rev2',
            'commit_position': 2,
            'url': 'url_2',
            'repo_name': 'chromium',
            'failures': {
                'a_test': ['a_test1', 'a_test2'],
                'b_test': ['b_test1'],
            },
            'top_score': None
        }
    ]
    self.assertEqual(expected_culprit_data, try_job_data.culprits)
    self.assertEqual(analysis.result_status,
                     result_status.FOUND_UNTRIAGED)
    self.assertEqual(analysis.suspected_cls, expected_cls)
    def testGetTryJobsForCompileSuccessSwarming(self, mock_buildbucket,
                                                mock_report):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        try_job_id = '1'
        try_job_url = 'https://luci-milo.appspot.com/swarming/task/3595be5002f4bc10'
        regression_range_size = 2

        try_job = WfTryJob.Create(master_name, builder_name, build_number)
        try_job_data = WfTryJobData.Create(try_job_id)
        try_job_data.try_job_key = try_job.key
        try_job_data.try_job_url = try_job_url
        try_job_data.put()
        try_job.compile_results = [{
            'report': None,
            'url': try_job_url,
            'try_job_id': '1',
        }]
        try_job.status = analysis_status.RUNNING
        try_job.put()

        build_response = {
            'id': '1',
            'url': try_job_url,
            'status': 'COMPLETED',
        }
        report = {
            'result': {
                'rev1': 'passed',
                'rev2': 'failed'
            },
            'metadata': {
                'regression_range_size': 2
            }
        }
        mock_buildbucket.GetTryJobs.return_value = [
            (None, buildbucket_client.BuildbucketBuild(build_response))
        ]
        mock_report.return_value = json.dumps(report)

        pipeline = MonitorTryJobPipeline()
        pipeline.start_test()
        pipeline.run(try_job.key.urlsafe(), failure_type.COMPILE, try_job_id)
        pipeline.callback(callback_params=pipeline.last_params)

        # Reload from ID to get all internal properties in sync.
        pipeline = MonitorTryJobPipeline.from_id(pipeline.pipeline_id)
        pipeline.finalized()
        compile_result = pipeline.outputs.default.value

        expected_compile_result = {
            'report': {
                'result': {
                    'rev1': 'passed',
                    'rev2': 'failed'
                },
                'metadata': {
                    'regression_range_size': regression_range_size
                }
            },
            'url': try_job_url,
            'try_job_id': '1',
        }

        self.assertEqual(expected_compile_result, compile_result)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        self.assertEqual(expected_compile_result, try_job.compile_results[-1])
        self.assertEqual(analysis_status.RUNNING, try_job.status)

        try_job_data = WfTryJobData.Get(try_job_id)
        self.assertEqual(try_job_data.regression_range_size,
                         regression_range_size)