Example #1
0
def _IsTestFailureUniqueAcrossPlatforms(master_name, builder_name,
                                        build_number, build_failure_type,
                                        blame_list, failed_steps,
                                        heuristic_result):

    if build_failure_type != failure_type.TEST:
        logging.info(
            'Expected test failure but get %s failure.' %
            failure_type.GetDescriptionForFailureType(build_failure_type))
        return True

    failed_steps_and_tests = _GetStepsAndTests(failed_steps)
    if not failed_steps_and_tests:
        return True
    groups = _GetMatchingTestFailureGroups(failed_steps_and_tests)
    suspected_cls_with_failures = (
        test_failure_analysis.GetSuspectedCLsWithFailures(
            master_name, builder_name, build_number, heuristic_result))

    # TODO(crbug/808699): update this function call when refactor
    # start_compile_try_job_pipeline.
    return try_job_service.IsBuildFailureUniqueAcrossPlatforms(
        master_name,
        builder_name,
        build_number,
        build_failure_type,
        blame_list,
        suspected_cls_with_failures,
        groups,
        failed_steps_and_tests=failed_steps_and_tests)
Example #2
0
    def testTriggerTryJob(self, mock_module):
        master_name = 'm'
        builder_name = 'b'
        build_number = 1
        build = WfBuild.Create(master_name, builder_name, build_number)
        build.data = {
            'properties': {
                'parent_mastername': 'pm',
                'parent_buildername': 'pb'
            }
        }
        build.put()
        response = {
            'build': {
                'id': '1',
                'url': 'url',
                'status': 'SCHEDULED',
            }
        }
        results = [(None,
                    buildbucket_client.BuildbucketBuild(response['build']))]
        mock_module.TriggerTryJobs.return_value = results

        build_id = ScheduleFlakeTryJobPipeline()._TriggerTryJob(
            master_name, builder_name, {}, [],
            failure_type.GetDescriptionForFailureType(failure_type.FLAKY_TEST),
            None, None)

        self.assertEqual(build_id, '1')
Example #3
0
def ScheduleCompileTryJob(parameters, runner_id):
    master_name, builder_name, build_number = (parameters.build_key.GetParts())
    properties = GetBuildProperties(parameters)
    tryserver_mastername, tryserver_buildername = try_job_service.GetTrybot()

    build_id, error = try_job_service.TriggerTryJob(
        master_name, builder_name, tryserver_mastername, tryserver_buildername,
        properties,
        failure_type.GetDescriptionForFailureType(failure_type.COMPILE),
        parameters.cache_name, parameters.dimensions, runner_id)

    if error:
        raise exceptions.RetryException(error.reason, error.message)
    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    try_job.compile_results.append({'try_job_id': build_id})
    try_job.try_job_ids.append(build_id)
    try_job.put()
    try_job = try_job_service.UpdateTryJob(master_name, builder_name,
                                           build_number, build_id,
                                           failure_type.COMPILE)

    # Create a corresponding WfTryJobData entity to capture as much metadata as
    # early as possible.
    try_job_service.CreateTryJobData(build_id,
                                     try_job.key,
                                     bool(parameters.compile_targets),
                                     bool(parameters.suspected_revisions),
                                     failure_type.COMPILE,
                                     runner_id=runner_id)

    return build_id
Example #4
0
def _IsCompileFailureUniqueAcrossPlatforms(master_name, builder_name,
                                           build_number, build_failure_type,
                                           blame_list, signals,
                                           heuristic_result):

    if build_failure_type != failure_type.COMPILE:
        logging.info(
            'Expected compile failure but get %s failure.',
            failure_type.GetDescriptionForFailureType(build_failure_type))
        return True

    output_nodes = _GetOutputNodes(signals)
    if not output_nodes:
        return True
    groups = _GetMatchingCompileFailureGroups(output_nodes)
    suspected_cls_with_failures = (
        compile_failure_analysis.GetSuspectedCLsWithCompileFailures(
            heuristic_result))

    return try_job_service.IsBuildFailureUniqueAcrossPlatforms(
        master_name,
        builder_name,
        build_number,
        build_failure_type,
        blame_list,
        suspected_cls_with_failures,
        groups,
        output_nodes=output_nodes)
Example #5
0
def ScheduleTestTryJob(parameters, notification_id):
    master_name, builder_name, build_number = (parameters.build_key.GetParts())

    properties = GetBuildProperties(parameters)

    tryserver_mastername, tryserver_buildername = try_job_service.GetTrybot()

    build_id, error = try_job_service.TriggerTryJob(
        master_name, builder_name, tryserver_mastername,
        tryserver_buildername, properties,
        failure_type.GetDescriptionForFailureType(failure_type.TEST),
        parameters.cache_name, parameters.dimensions, notification_id)

    if error:
        raise exceptions.RetryException(error.reason, error.message)
    try_job = try_job_service.UpdateTryJob(master_name, builder_name,
                                           build_number, build_id,
                                           failure_type.TEST)

    # Create a corresponding WfTryJobData entity to capture as much metadata as
    # early as possible.
    try_job_service.CreateTryJobData(build_id,
                                     try_job.key,
                                     False,
                                     bool(parameters.suspected_revisions),
                                     failure_type.TEST,
                                     runner_id=notification_id)

    return build_id
 def _CreateTryJobData(self, build_id, try_job_key, has_heuristic_results):
     try_job_data = WfTryJobData.Create(build_id)
     try_job_data.created_time = time_util.GetUTCNow()
     try_job_data.has_compile_targets = False
     try_job_data.has_heuristic_results = has_heuristic_results
     try_job_data.try_job_key = try_job_key
     try_job_data.try_job_type = failure_type.GetDescriptionForFailureType(
         failure_type.TEST)
     try_job_data.put()
Example #7
0
def _NeedANewTestTryJob(start_test_try_job_inputs):
    """Decides if a new test try job is needed.

  A new test try job is needed if:
  1. It passed preliminary checks in try_job_service.NeedANewWaterfallTryJob,
  2. It's for a test failure,
  3. It contains some first failed steps/tests

  Returns:
    A bool to indicate if a new try job is needed.
  """
    master_name, builder_name, build_number = (
        start_test_try_job_inputs.build_key.GetParts())
    force = start_test_try_job_inputs.force
    build_completed = start_test_try_job_inputs.build_completed

    # TODO(crbug/808699):  update this function call when refactor
    # start_compile_try_job_pipeline.
    need_new_try_job = try_job_service.NeedANewWaterfallTryJob(
        master_name,
        builder_name,
        build_number,
        force,
        build_completed=build_completed)
    if not need_new_try_job:
        return False

    failure_info = start_test_try_job_inputs.heuristic_result.failure_info
    if not failure_info or failure_info.failure_type is None:
        return False

    try_job_type = failure_info.failure_type
    if try_job_type != failure_type.TEST:
        logging.error('Checking for a test try job but got a %s failure.',
                      failure_type.GetDescriptionForFailureType(try_job_type))
        return False

    consistent_failures = start_test_try_job_inputs.consistent_failures

    if (not force and waterfall_config.ShouldSkipTestTryJobs(
            master_name, builder_name)):
        logging.info('Test try jobs on %s, %s are not supported yet.',
                     master_name, builder_name)
        return False

    if not consistent_failures.consistent_failures:
        # consistent_failures is empty. Either tests are flaky or task failed.
        logging.info(
            'All tests are flaky or tasks failed, no try job will be triggered.'
        )
        return False

    return _HasBuildKeyForBuildInfoInFailureResultMap(master_name,
                                                      builder_name,
                                                      build_number)
    def testSuccessfullyScheduleNewTryJobForCompileWithSuspectedRevisions(
            self, mock_module):
        master_name = 'm'
        builder_name = 'b'
        build_number = 223
        good_revision = 'rev1'
        bad_revision = 'rev2'
        build_id = '1'
        url = 'url'
        build = WfBuild.Create(master_name, builder_name, build_number)
        build.data = {
            'properties': {
                'parent_mastername': 'pm',
                'parent_buildername': 'pb'
            }
        }
        build.put()

        response = {
            'build': {
                'id': build_id,
                'url': url,
                'status': 'SCHEDULED',
            }
        }
        results = [(None,
                    buildbucket_client.BuildbucketBuild(response['build']))]
        mock_module.TriggerTryJobs.return_value = results

        WfTryJob.Create(master_name, builder_name, build_number).put()

        try_job_pipeline = ScheduleCompileTryJobPipeline()
        try_job_id = try_job_pipeline.run(master_name, builder_name,
                                          build_number, good_revision,
                                          bad_revision, failure_type.COMPILE,
                                          None, ['r5'], None, None)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        try_job_data = WfTryJobData.Get(build_id)

        expected_try_job_id = '1'
        self.assertEqual(expected_try_job_id, try_job_id)
        self.assertEqual(expected_try_job_id,
                         try_job.compile_results[-1]['try_job_id'])
        self.assertTrue(expected_try_job_id in try_job.try_job_ids)
        self.assertIsNotNone(try_job_data)
        self.assertEqual(try_job_data.master_name, master_name)
        self.assertEqual(try_job_data.builder_name, builder_name)
        self.assertEqual(try_job_data.build_number, build_number)
        self.assertEqual(
            try_job_data.try_job_type,
            failure_type.GetDescriptionForFailureType(failure_type.COMPILE))
        self.assertFalse(try_job_data.has_compile_targets)
        self.assertTrue(try_job_data.has_heuristic_results)
def RecordTestFailureAnalysisStateChange(master_name, builder_name, status,
                                         analysis_type):
  """Records state changes for test failure analysis."""
  monitoring.OnWaterfallAnalysisStateChange(
      master_name=master_name,
      builder_name=builder_name,
      failure_type=failure_type.GetDescriptionForFailureType(failure_type.TEST),
      canonical_step_name='Unknown',
      isolate_target_name='Unknown',
      status=analysis_status.STATUS_TO_DESCRIPTION[status],
      analysis_type=analysis_approach_type.STATUS_TO_DESCRIPTION[analysis_type])
Example #10
0
def RecordCompileFailureAnalysisStateChange(master_name, builder_name, status,
                                            analysis_type):
  """Records state changes for compile failure anlaysis."""
  monitoring.OnWaterfallAnalysisStateChange(
      master_name=master_name,
      builder_name=builder_name,
      failure_type=failure_type.GetDescriptionForFailureType(
          failure_type.COMPILE),
      canonical_step_name='compile',
      isolate_target_name='N/A',
      status=analysis_status.STATUS_TO_DESCRIPTION[status],
      analysis_type=analysis_approach_type.STATUS_TO_DESCRIPTION[analysis_type])
def _Serialize(analysis):
    return {
        'master_name':
        analysis.master_name,
        'builder_name':
        analysis.builder_name,
        'build_number':
        analysis.build_number,
        'analysis_type':
        failure_type.GetDescriptionForFailureType(analysis.failure_type),
        'build_start_time':
        time_util.FormatDatetime(analysis.build_start_time),
    }
Example #12
0
def MonitoringCulpritNotification(build_failure_type, action_type, sent,
                                  should_send):
    build_failure_type = failure_type.GetDescriptionForFailureType(
        build_failure_type)
    if sent:
        monitoring.OnCulpritAction(build_failure_type,
                                   '%s_notified' % action_type)
    elif should_send:
        monitoring.OnCulpritAction(build_failure_type,
                                   '%s_notified_error' % action_type)
    else:
        monitoring.OnCulpritAction(build_failure_type,
                                   '%s_notified_skip' % action_type)
Example #13
0
def NeedANewCompileTryJob(start_try_job_input):
    """Decides if a new compile try job is needed.

  A new compile try job is needed if:
  1. It passed preliminary checks in try_job_service.NeedANewWaterfallTryJob,
  2. It's for a compile failure,
  3. It's a first failure,
  4. There is no other running or completed try job.

  Returns:
    A bool to indicate if a new try job is needed.
    A key to the entity of the try job.
  """
    master_name, builder_name, build_number = (
        start_try_job_input.build_key.GetParts())
    need_new_try_job = try_job_service.NeedANewWaterfallTryJob(
        master_name, builder_name, build_number, start_try_job_input.force)

    if not need_new_try_job:
        return False, None

    try_job_type = start_try_job_input.heuristic_result.failure_info.failure_type
    if try_job_type != failure_type.COMPILE:
        logging.error('Checking for a compile try job but got a %s failure.',
                      failure_type.GetDescriptionForFailureType(try_job_type))
        return False, None

    need_new_try_job = _NeedANewCompileTryJob(
        master_name, builder_name, build_number,
        start_try_job_input.heuristic_result.failure_info)

    # TODO(chanli): enable the feature to trigger single try job for a group
    # when notification is ready.
    # We still call _IsBuildFailureUniqueAcrossPlatforms just so we have data for
    # failure groups.

    # TODO(chanli): Add checking for culprits of the group when enabling
    # single try job: add current build to suspected_cl.builds if the try job for
    # this group has already completed.
    if need_new_try_job:
        _IsCompileFailureUniqueAcrossPlatforms(
            master_name, builder_name, build_number, try_job_type,
            start_try_job_input.heuristic_result.failure_info.builds[str(
                build_number)].blame_list,
            start_try_job_input.heuristic_result.signals,
            start_try_job_input.heuristic_result.heuristic_result)

    try_job_was_created, try_job_key = try_job_service.ReviveOrCreateTryJobEntity(
        master_name, builder_name, build_number, start_try_job_input.force)
    need_new_try_job = need_new_try_job and try_job_was_created
    return need_new_try_job, try_job_key
    def run(self, master_name, builder_name, build_number, good_revision,
            bad_revision, try_job_type, suspected_revisions, cache_name,
            dimensions, *task_results):
        """
    Args:
      master_name (str): the master name of a build.
      builder_name (str): the builder name of a build.
      build_number (int): the build number of a build.
      good_revision (str): the revision of the last passed build.
      bad__revision (str): the revision of the first failed build.
      try_job_type (int): type of the try job: TEST in this case.
      suspected_revisions (list): a list of suspected revisions from heuristic.
      cache_name (str): A string to identify separate directories for different
          waterfall bots on the trybots.
      dimensions (list): A list of strings in the format
          ["key1:value1", "key2:value2"].
      task_results (list): a list of reliable failed tests.

    Returns:
      build_id (str): id of the triggered try job.
    """

        properties = self._GetBuildProperties(master_name, builder_name,
                                              build_number, good_revision,
                                              bad_revision, try_job_type,
                                              suspected_revisions)

        targeted_tests = _GetTargetedTests(dict(task_results))
        if not targeted_tests:  # pragma: no cover
            logging.info('All tests are flaky, no try job will be triggered.')
            return

        additional_parameters = {'tests': targeted_tests}

        build_id = self._TriggerTryJob(
            master_name, builder_name, properties, additional_parameters,
            failure_type.GetDescriptionForFailureType(failure_type.TEST),
            cache_name, dimensions)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        try_job.test_results.append({'try_job_id': build_id})
        try_job.try_job_ids.append(build_id)
        try_job.put()

        # Create a corresponding WfTryJobData entity to capture as much metadata as
        # early as possible.
        self._CreateTryJobData(build_id, try_job.key,
                               bool(suspected_revisions))

        return build_id
Example #15
0
def CreateTryJobData(build_id,
                     try_job_key,
                     has_compile_targets,
                     has_heuristic_results,
                     try_job_type,
                     runner_id=None):
    try_job_data = WfTryJobData.Create(build_id)
    try_job_data.created_time = time_util.GetUTCNow()
    try_job_data.has_compile_targets = has_compile_targets
    try_job_data.has_heuristic_results = has_heuristic_results
    try_job_data.try_job_key = try_job_key
    try_job_data.try_job_type = failure_type.GetDescriptionForFailureType(
        try_job_type)
    try_job_data.runner_id = runner_id
    try_job_data.put()
Example #16
0
def MonitorRevertAction(build_failure_type, revert_status, commit_status):
    build_failure_type = failure_type.GetDescriptionForFailureType(
        build_failure_type)
    if revert_status == constants.CREATED_BY_FINDIT:
        if commit_status == constants.COMMITTED:
            monitoring.OnCulpritAction(build_failure_type, 'revert_committed')
        elif commit_status == constants.ERROR:
            monitoring.OnCulpritAction(build_failure_type,
                                       'revert_commit_error')
        else:
            monitoring.OnCulpritAction(build_failure_type, 'revert_created')
    elif revert_status == constants.CREATED_BY_SHERIFF:
        monitoring.OnCulpritAction(build_failure_type, 'revert_confirmed')
    elif revert_status == constants.ERROR:
        monitoring.OnCulpritAction(build_failure_type, 'revert_status_error')
Example #17
0
def _IsBuildFailureUniqueAcrossPlatforms(master_name, builder_name,
                                         build_number, build_failure_type,
                                         blame_list, failed_steps, signals,
                                         heuristic_result):
    output_nodes = None
    failed_steps_and_tests = None

    if build_failure_type == failure_type.COMPILE:
        output_nodes = _GetOutputNodes(signals)
        if not output_nodes:
            return True
        groups = _GetMatchingCompileFailureGroups(output_nodes)
    elif build_failure_type == failure_type.TEST:
        failed_steps_and_tests = _GetStepsAndTests(failed_steps)
        if not failed_steps_and_tests:
            return True
        groups = _GetMatchingTestFailureGroups(failed_steps_and_tests)
    else:
        logging.info(
            'Grouping %s failures is not supported. Only Compile and Test'
            'failures can be grouped.' %
            failure_type.GetDescriptionForFailureType(build_failure_type))
        return True

    suspected_tuples = sorted(GetSuspectedCLsWithFailures(heuristic_result))
    existing_group = _GetMatchingGroup(groups, blame_list, suspected_tuples)

    # Create a new WfFailureGroup if we've encountered a unique build failure.
    if existing_group:
        logging.info('A group already exists, no need for a new try job.')
        _LinkAnalysisToBuildFailureGroup(
            master_name, builder_name, build_number, [
                existing_group.master_name, existing_group.builder_name,
                existing_group.build_number
            ])
    else:
        logging.info(
            'A new try job should be run for this unique build failure.')
        _CreateBuildFailureGroup(master_name, builder_name, build_number,
                                 build_failure_type, blame_list,
                                 suspected_tuples, output_nodes,
                                 failed_steps_and_tests)
        _LinkAnalysisToBuildFailureGroup(
            master_name, builder_name, build_number,
            [master_name, builder_name, build_number])

    return not existing_group
    def run(self, master_name, builder_name, build_number, good_revision,
            bad_revision, try_job_type, compile_targets, suspected_revisions,
            cache_name, dimensions):
        """
    Args:
      master_name (str): the master name of a build.
      builder_name (str): the builder name of a build.
      build_number (int): the build number of a build.
      good_revision (str): the revision of the last passed build.
      bad__revision (str): the revision of the first failed build.
      try_job_type (int): type of the try job: COMPILE in this case.
      compile_targets (list): a list of failed output nodes.
      suspected_revisions (list): a list of suspected revisions from heuristic.
      cache_name (str): A string to identify separate directories for different
          waterfall bots on the trybots.
      dimensions (list): A list of strings in the format
          ["key1:value1", "key2:value2"].

    Returns:
      build_id (str): id of the triggered try job.
    """

        properties = self._GetBuildProperties(master_name, builder_name,
                                              build_number, good_revision,
                                              bad_revision, try_job_type,
                                              suspected_revisions)
        additional_parameters = {'compile_targets': compile_targets}

        build_id = self._TriggerTryJob(
            master_name, builder_name, properties, additional_parameters,
            failure_type.GetDescriptionForFailureType(failure_type.COMPILE),
            cache_name, dimensions)

        try_job = WfTryJob.Get(master_name, builder_name, build_number)
        try_job.compile_results.append({'try_job_id': build_id})
        try_job.try_job_ids.append(build_id)
        try_job.put()

        # Create a corresponding WfTryJobData entity to capture as much metadata as
        # early as possible.
        self._CreateTryJobData(build_id, try_job.key, bool(compile_targets),
                               bool(suspected_revisions))

        return build_id
Example #19
0
  def testSuccessfullyScheduleNewTryJobForTest(self, mock_module):
    master_name = 'm'
    builder_name = 'b'
    build_number = 223
    good_revision = 'rev1'
    bad_revision = 'rev2'
    targeted_tests = ['a on platform', ['a', ['test1', 'test2']]]
    build_id = '1'
    build = WfBuild.Create(master_name, builder_name, build_number)
    build.data = {'properties': {'parent_mastername': 'pm',
                                 'parent_buildername': 'pb'}}
    build.put()

    response = {
        'build': {
            'id': build_id,
            'url': 'url',
            'status': 'SCHEDULED',
        }
    }
    results = [(None, buildbucket_client.BuildbucketBuild(response['build']))]
    mock_module.TriggerTryJobs.return_value = results

    WfTryJob.Create(master_name, builder_name, build_number).put()

    try_job_pipeline = ScheduleTestTryJobPipeline()
    try_job_id = try_job_pipeline.run(
        master_name, builder_name, build_number, good_revision, bad_revision,
        failure_type.TEST, None, None, None, targeted_tests)

    try_job = WfTryJob.Get(master_name, builder_name, build_number)
    self.assertEqual(try_job_id, build_id)
    self.assertEqual(try_job.test_results[-1]['try_job_id'], build_id)

    try_job_data = WfTryJobData.Get(try_job_id)
    self.assertIsNotNone(try_job_data)
    self.assertEqual(try_job_data.master_name, master_name)
    self.assertEqual(try_job_data.builder_name, builder_name)
    self.assertEqual(try_job_data.build_number, build_number)
    self.assertEqual(
        try_job_data.try_job_type,
        failure_type.GetDescriptionForFailureType(failure_type.TEST))
    self.assertFalse(try_job_data.has_compile_targets)
    self.assertFalse(try_job_data.has_heuristic_results)
Example #20
0
def ScheduleFlakeTryJob(parameters, runner_id):
    """Schedules a flake try job to compile and isolate."""
    analysis = ndb.Key(urlsafe=parameters.analysis_urlsafe_key).get()
    assert analysis

    master_name = analysis.master_name
    builder_name = analysis.builder_name
    step_name = analysis.canonical_step_name
    test_name = analysis.test_name
    isolate_target_name = parameters.isolate_target_name

    properties = GetBuildProperties(master_name, builder_name,
                                    isolate_target_name, parameters.revision)

    tryserver_mastername, tryserver_buildername = try_job_service.GetTrybot()

    assert tryserver_mastername and tryserver_buildername, (
        'No tryserver master/builder is set for {}/{}'.format(
            master_name, builder_name))

    # TODO(crbug.com/787096): assert dimensions exists once full migration to LUCI
    # is complete.
    dimensions = (parameters.dimensions.ToSerializable()
                  if parameters.dimensions else [])

    build_id, error = try_job_service.TriggerTryJob(
        master_name, builder_name, tryserver_mastername, tryserver_buildername,
        properties,
        failure_type.GetDescriptionForFailureType(failure_type.FLAKY_TEST),
        parameters.flake_cache_name, dimensions, runner_id)

    if error:
        raise exceptions.RetryException(error.message, error.reason)

    try_job = UpdateTryJob(master_name, builder_name, step_name, test_name,
                           parameters.revision, build_id)

    # Create a corresponding FlakeTryJobData entity to capture as much metadata as
    # early as possible.
    CreateTryJobData(build_id, try_job.key, parameters.analysis_urlsafe_key,
                     runner_id)

    return build_id
  def run(self, master_name, builder_name, canonical_step_name,
          test_name, git_hash, urlsafe_analysis_key, cache_name, dimensions,
          iterations_to_rerun=None):
    """Triggers a flake try job.

    Args:
      master_name (str): The master name of a flaky test.
      builder_name (str): The builder name of a flaky test.
      canonical_step_name (str): The canonical name of the step the flaky test
          occurred on.
      test_name (str): The name of the flaky test.
      git_hash (str): The git hash of the revision to run the try job against.
      urlsafe_analysis_key (str): The urlsafe key of the original
          MasterFlakeAnalysis that triggered this try job.
      cache_name (str): A string to identify separate directories for different
          waterfall bots on the trybots.
      dimensions (list): A list of strings in the format
          ["key1:value1", "key2:value2"].
      iterations_to_rerun (int): The number of iterations to rerun.

    Returns:
      build_id (str): Id of the triggered try job.
    """
    properties = self._GetBuildProperties(
        master_name, builder_name, canonical_step_name, test_name, git_hash,
        iterations_to_rerun)
    build_id = self._TriggerTryJob(
        master_name, builder_name, properties, {},
        failure_type.GetDescriptionForFailureType(failure_type.FLAKY_TEST),
        cache_name, dimensions)

    try_job = FlakeTryJob.Get(
        master_name, builder_name, canonical_step_name, test_name, git_hash)
    try_job.flake_results.append({'try_job_id': build_id})
    try_job.try_job_ids.append(build_id)
    try_job.put()

    # Create a corresponding Flake entity to capture as much metadata as early
    # as possible.
    self._CreateTryJobData(build_id, try_job.key, urlsafe_analysis_key)

    return build_id
Example #22
0
def GetBuildProperties(pipeline_input, try_job_type):
    master_name, builder_name, build_number = pipeline_input.build_key.GetParts(
    )
    properties = {
        'recipe':
        'findit/chromium/%s' %
        (failure_type.GetDescriptionForFailureType(try_job_type)),
        'good_revision':
        pipeline_input.good_revision,
        'bad_revision':
        pipeline_input.bad_revision,
        'target_mastername':
        master_name,
        'referenced_build_url':
        buildbot.CreateBuildUrl(master_name, builder_name, build_number),
        'suspected_revisions':
        pipeline_input.suspected_revisions or [],
    }

    return properties
Example #23
0
def _RecordSwarmingTaskStateChange(master_name, builder_name, build_number,
                                   step_name, status, analysis_type):
    """Records state changes for swarming tasks."""
    step_metadata = {}
    if step_name:
        step_metadata = step_util.LegacyGetStepMetadata(
            master_name, builder_name, build_number, step_name) or {}

    monitoring.OnWaterfallAnalysisStateChange(
        master_name=master_name,
        builder_name=builder_name,
        failure_type=failure_type.GetDescriptionForFailureType(
            failure_type.TEST),
        canonical_step_name=step_metadata.get('canonical_step_name')
        or 'Unknown',
        isolate_target_name=step_metadata.get('isolate_target_name')
        or 'Unknown',
        status=analysis_status.STATUS_TO_DESCRIPTION[status],
        analysis_type=analysis_approach_type.
        STATUS_TO_DESCRIPTION[analysis_type])
Example #24
0
def SaveAnalysisAfterHeuristicAnalysisCompletes(master_name, builder_name,
                                                build_number, analysis_result,
                                                suspected_cls):
    analysis = WfAnalysis.Get(master_name, builder_name, build_number)
    analysis.result = analysis_result
    analysis.status = analysis_status.COMPLETED
    analysis.result_status = GetResultAnalysisStatus(analysis_result)
    analysis.suspected_cls = _GetSuspectedCLsWithOnlyCLInfo(suspected_cls)
    analysis.end_time = time_util.GetUTCNow()
    analysis.put()

    duration = analysis.end_time - analysis.start_time
    status = result_status.RESULT_STATUS_TO_DESCRIPTION.get(
        analysis.result_status, 'no result')
    monitoring.analysis_durations.add(
        duration.total_seconds(), {
            'type':
            failure_type.GetDescriptionForFailureType(analysis.failure_type),
            'result':
            'heuristic-' + status,
        })
Example #25
0
    def _GetBuildProperties(self, master_name, builder_name, build_number,
                            good_revision, bad_revision, try_job_type,
                            suspected_revisions):
        properties = {
            'recipe':
            'findit/chromium/%s' %
            (failure_type.GetDescriptionForFailureType(try_job_type)),
            'good_revision':
            good_revision,
            'bad_revision':
            bad_revision,
            'target_mastername':
            master_name,
            'referenced_build_url':
            buildbot.CreateBuildUrl(master_name, builder_name, build_number)
        }

        if suspected_revisions:
            properties['suspected_revisions'] = suspected_revisions

        return properties
Example #26
0
 def failure_type_str(self):
     return failure_type.GetDescriptionForFailureType(self.failure_type)
Example #27
0
 def testDescriptionForInvalidType(self):
   self.assertIn('No description for',
                 failure_type.GetDescriptionForFailureType(0x666))
Example #28
0
 def testDescriptionForValidType(self):
   self.assertEqual(
       'compile',
       failure_type.GetDescriptionForFailureType(failure_type.COMPILE))