def _GetRerunBuildInputProperties(context, referred_build, targets):
  luci_project = context.luci_project_name
  project_api = projects.GetProjectAPI(luci_project)
  assert project_api, 'Unsupported project {}'.format(luci_project)

  return project_api.GetCompileRerunBuildInputProperties(
      referred_build, targets)
Ejemplo n.º 2
0
def BackfillIfSkippedAnalyses(context, build):
  """Trigger analyses on previously skipped failures if required.

  May need to trigger analyses for previous failures if this build's
  failures are merged into them but they were skipped.

  Note that if Findit just starts to support a builder, there could be non-first
  failures in the first build analyzed by Findit. In this case since Findit
  doesn't have any information about the builds before supporting, it will not
  backfill those analyses.
  """
  luci_project = context.luci_project_name
  project_api = projects.GetProjectAPI(luci_project)
  analysis_api = TestAnalysisAPI()

  failures = analysis_api.GetFailureEntitiesForABuild(build)
  skipped_failures = analysis_api.GetSkippedFailures(project_api, failures)
  if not skipped_failures:
    return

  for prev_build_id, failures_to_analyze in (skipped_failures.iteritems()):
    prev_build, prev_context = build_util.GetBuildAndContextForAnalysis(
        luci_project, prev_build_id)
    analysis_api.AnalyzeSkippedFailures(project_api, prev_context, prev_build,
                                        failures_to_analyze)
Ejemplo n.º 3
0
def _ProcessAndSaveRerunBuildResult(context, analyzed_build_id, rerun_build):
  """Gets results of a completed rerun build and save it in datastore.

  Args:
     context (findit_v2.services.context.Context): Scope of the analysis.
     analyzed_build_id (int): Id of the failed ci/post_submit build that's being
       analyzed.
     rerun_build (buildbucket build.proto): ALL info about the rerun build.

   Returns:
     True if the rerun build entity is updated, otherwise False.
  """
  project_api = projects.GetProjectAPI(context.luci_project_name)

  analysis = TestFailureAnalysis.GetVersion(analyzed_build_id)
  if not analysis:
    logging.error('TestFailureAnalysis missing for %d.', analyzed_build_id)
    return False

  rerun_build_entity = TestRerunBuild.get_by_id(
      rerun_build.id, parent=analysis.key)
  if not rerun_build_entity:
    logging.error('TestRerunBuild entity for build %d missing.', rerun_build.id)
    return False

  detailed_test_failures = {}
  if rerun_build.status == common_pb2.FAILURE:
    failed_steps = build_util.GetFailedStepsInBuild(context, rerun_build)
    test_steps = [fs[0] for fs in failed_steps if fs[1] == StepTypeEnum.TEST]
    detailed_test_failures = project_api.GetTestFailures(
        rerun_build, test_steps) if test_steps else {}
  _SaveRerunBuildResults(rerun_build_entity, rerun_build.status,
                         detailed_test_failures,
                         rerun_build.end_time.ToDatetime())
  return True
def GetFirstFailuresInCurrentBuildWithoutGroup(context, build,
                                               first_failures_in_current_build):
  """Gets first failures without existing failure groups.

  Args:
    context (findit_v2.services.context.Context): Scope of the analysis.
    build (buildbucket build.proto): ALL info about the build.
    first_failures_in_current_build (dict): A dict for failures that happened
      the first time in current build.
      {
        'failures': {
          'step name': {
            'output_targets': [
              frozenset(['target4']),
              frozenset(['target1', 'target2'])],
            'last_passed_build': {
              'id': 8765432109,
              'number': 122,
              'commit_id': 'git_sha1'
            },
          },
        },
        'last_passed_build': {
          'id': 8765432109,
          'number': 122,
          'commit_id': 'git_sha1'
        }
      }

  Returns:
    failures_without_existing_group (dict): updated version of
      first_failures_in_current_build, no failures with existing group.
  """
  luci_project = context.luci_project_name
  project_api = projects.GetProjectAPI(luci_project)
  assert project_api, 'Unsupported project {}'.format(luci_project)

  failures_with_existing_group = (
      project_api.GetFailuresWithMatchingCompileFailureGroups(
          context, build, first_failures_in_current_build))

  if not failures_with_existing_group:
    # All failures need a new group.
    return first_failures_in_current_build

  _UpdateCompileFailureEntitiesWithGroupInfo(build,
                                             failures_with_existing_group)

  return _GetFailuresWithoutMatchingCompileFailureGroups(
      build.id, first_failures_in_current_build, failures_with_existing_group)
Ejemplo n.º 5
0
def GetFailedStepsInBuild(context, build):
    """Gets failed steps and their types for a LUCI build.

  Args:
    context (findit_v2.services.context.Context): Scope of the analysis.
    build (buildbucket build.proto): ALL info about the build.

  Returns:
    A list of tuples, each tuple contains the information of a failed step and
    its type.
  """
    project_api = projects.GetProjectAPI(context.luci_project_name)
    assert project_api, 'Unsupported project {}'.format(
        context.luci_project_name)

    failed_steps = []
    for step in build.steps:
        if step.status != common_pb2.FAILURE:
            continue
        failure_type = project_api.ClassifyStepType(build, step)
        failed_steps.append((step, failure_type))

    return failed_steps
Ejemplo n.º 6
0
 def testGetProjectAPI(self):
     self.assertTrue(
         isinstance(projects.GetProjectAPI('chromeos'), ChromeOSProjectAPI))
Ejemplo n.º 7
0
def AnalyzeTestFailure(context, build, test_steps):
  """Analyzes test failure from a failed ci/postsubmit build.

  Args:
    context (findit_v2.services.context.Context): Scope of the analysis.
    build (buildbucket build.proto): ALL info about the build.
    test_steps (list of buildbucket step.proto): The failed test steps.

  Returns:
    (bool): Returns True if a new analysis starts, otherwise False.
  """
  luci_project = context.luci_project_name
  if luci_project == 'chromium':
    logging.warning('Findit does not support chromium project in v2.')
    return False

  project_api = projects.GetProjectAPI(luci_project)

  analysis_api = TestAnalysisAPI()

  # Project config for if failures should be grouped to reduce duplicated
  # analyses.
  should_group_failures = projects.PROJECT_CFG.get(
      luci_project, {}).get('should_group_failures')

  detailed_test_failures = project_api.GetTestFailures(build, test_steps)
  # Checks previous builds to look for first time failures for all the failures
  # in current failed build.
  analysis_api.UpdateFailuresWithFirstFailureInfo(context, build,
                                                  detailed_test_failures)
  analysis_api.SaveFailures(context, build, detailed_test_failures)

  # Looks for the failures that started to fail in the current build.
  first_failures_in_current_build = (
      analysis_api.GetFirstFailuresInCurrentBuild(build,
                                                  detailed_test_failures))
  if not first_failures_in_current_build.get('failures'):
    logging.info(
        'No new analysis for build %d because all failures have '
        'happened in previous builds.', build.id)
    return False

  # Filters out the first failures with existing failure group.
  if should_group_failures:
    failures_without_existing_group = (
        analysis_api.GetFirstFailuresInCurrentBuildWithoutGroup(
            project_api, context, build, first_failures_in_current_build))
  else:
    failures_without_existing_group = first_failures_in_current_build

  if not failures_without_existing_group.get('failures'):
    logging.info(
        'All failures have matching failure groups in build %s,'
        ' no need to start a new analysis.', build.id)
    return False

  # Starts a new analysis to analyze the first time failures.
  analysis = analysis_api.SaveFailureAnalysis(project_api, context, build,
                                              failures_without_existing_group,
                                              should_group_failures)
  if not analysis:
    return False
  analysis_api.RerunBasedAnalysis(context, build.id)
  return True
def SaveCompileAnalysis(context, build, failures_without_existing_group,
                        should_group_failures):
  """Creates and saves CompileFailureAnalysis entity for the build being
    analyzed if there are first failures in the build.

  Args:
    context (findit_v2.services.context.Context): Scope of the analysis.
    build (buildbucket build.proto): ALL info about the build.
    failures_without_existing_group (dict): A dict for failures that happened
      the first time in current build and with no matching group.
      {
        'failures': {
          'compile': {
            'output_targets': ['target4', 'target1', 'target2'],
            'last_passed_build': {
              'id': 8765432109,
              'number': 122,
              'commit_id': 'git_sha1'
            },
          },
        },
        'last_passed_build': {
          'id': 8765432109,
          'number': 122,
          'commit_id': 'git_sha1'
        }
      }
    should_group_failures (bool): Project config for if failures should be
      grouped to reduce duplicated analyses.
  """
  luci_project = context.luci_project_name
  project_api = projects.GetProjectAPI(luci_project)
  assert project_api, 'Unsupported project {}'.format(luci_project)

  rerun_builder_id = project_api.GetRerunBuilderId(build)

  # Gets keys to the compile failures that failed the first time in the build.
  # They will be the failures to analyze in the analysis.
  compile_failure_keys = _GetCompileFailureKeys(
      build, failures_without_existing_group)

  repo_url = git.GetRepoUrlFromContext(context)
  last_passed_gitiles_id = failures_without_existing_group['last_passed_build'][
      'commit_id']
  last_passed_commit_position = git.GetCommitPositionFromRevision(
      last_passed_gitiles_id, repo_url, ref=context.gitiles_ref)
  first_failed_commit_position = git.GetCommitPositionFromRevision(
      context.gitiles_id, repo_url, ref=context.gitiles_ref)

  if should_group_failures:
    _CreateAndSaveFailureGroupEntity(
        context, build, compile_failure_keys, last_passed_gitiles_id,
        last_passed_commit_position, first_failed_commit_position)

  analysis = CompileFailureAnalysis.Create(
      luci_project=luci_project,
      luci_bucket=build.builder.bucket,
      luci_builder=build.builder.builder,
      build_id=build.id,
      gitiles_host=context.gitiles_host,
      gitiles_project=context.gitiles_project,
      gitiles_ref=context.gitiles_ref,
      last_passed_gitiles_id=last_passed_gitiles_id,
      last_passed_commit_position=last_passed_commit_position,
      first_failed_gitiles_id=context.gitiles_id,
      first_failed_commit_position=first_failed_commit_position,
      rerun_builder_id=rerun_builder_id,
      compile_failure_keys=compile_failure_keys)
  analysis.Save()
  return analysis
def GetFirstFailuresInCurrentBuild(context, build, detailed_compile_failures):
  """Gets failures that happened the first time in the current build.

  Failures without last_passed_build will not be included even if they failed
  the first time in current build (they have statuses other than SUCCESS or
  FAILURE in all previous builds), because Findit cannot decide the left bound
  of the regression range.

  If first failures have different last_passed_build, use the earliest one.

  Args:
    context (findit_v2.services.context.Context): Scope of the analysis.
    build (buildbucket build.proto): ALL info about the build.
    detailed_compile_failures (dict): A dict of detailed compile failures.
      {
        'step_name': {
          'failures': {
            frozenset(['target1', 'target2']): {
              'rule': 'emerge',
              'first_failed_build': {
                'id': 8765432109,
                'number': 123,
                'commit_id': 654321
              },
              'last_passed_build': None
            },
            ...
          },
          'first_failed_build': {
            'id': 8765432109,
            'number': 123,
            'commit_id': 654321
          },
          'last_passed_build': None
        },
      }
  Returns:
    dict: A dict for failures that happened the first time in current build.
    {
      'failures': {
        'compile': {
          'output_targets': ['target4', 'target1', 'target2'],
          'last_passed_build': {
            'id': 8765432109,
            'number': 122,
            'commit_id': 'git_sha1'
          },
        },
      },
      'last_passed_build': {
        # In this build all the failures that happened in the build being
        # analyzed passed.
        'id': 8765432108,
        'number': 121,
        'commit_id': 'git_sha0'
      }
    }
  """
  luci_project = context.luci_project_name
  project_api = projects.GetProjectAPI(luci_project)
  assert project_api, 'Unsupported project {}'.format(luci_project)

  first_failures_in_current_build = {'failures': {}, 'last_passed_build': None}
  for step_ui_name, step_info in detailed_compile_failures.iteritems():
    if not step_info[
        'failures'] and step_info['first_failed_build']['id'] != build.id:
      # Only step level information and the step started to fail in previous
      # builds.
      continue

    if step_info['first_failed_build']['id'] == build.id and step_info[
        'last_passed_build']:
      # All failures in this step are first failures and last pass was found.
      first_failures_in_current_build['failures'][step_ui_name] = {
          'output_targets': [],
          'last_passed_build': step_info['last_passed_build'],
      }
      for output_targets, failure in step_info['failures'].iteritems():
        first_failures_in_current_build['failures'][step_ui_name][
            'output_targets'].append(output_targets)

      first_failures_in_current_build['last_passed_build'] = (
          _GetEarlierBuild(first_failures_in_current_build['last_passed_build'],
                           step_info['last_passed_build']))
      continue

    first_failures_in_step = {
        'output_targets': [],
        'last_passed_build': step_info['last_passed_build'],
    }
    for output_targets, failure in step_info['failures'].iteritems():
      if failure['first_failed_build']['id'] == build.id and failure[
          'last_passed_build']:
        first_failures_in_step['output_targets'].append(output_targets)
        first_failures_in_step['last_passed_build'] = (
            _GetEarlierBuild(first_failures_in_step['last_passed_build'],
                             failure['last_passed_build']))
    if first_failures_in_step['output_targets']:
      # Some failures are first time failures in current build.
      first_failures_in_current_build['failures'][
          step_ui_name] = first_failures_in_step

      first_failures_in_current_build['last_passed_build'] = (
          _GetEarlierBuild(first_failures_in_current_build['last_passed_build'],
                           first_failures_in_step['last_passed_build']))

  return first_failures_in_current_build
Ejemplo n.º 10
0
def UpdateCompileFailuresWithFirstFailureInfo(context, build,
                                              detailed_compile_failures):
  """Updates detailed_compile_failures with first failure info.

  Args:
    context (findit_v2.services.context.Context): Scope of the analysis.
    build (buildbucket build.proto): ALL info about the build.
    detailed_compile_failures (dict): A dict of detailed compile failures.
      {
        'step_name': {
          'failures': {
            frozenset(['target1', 'target2']): {
              'rule': 'emerge',
              'first_failed_build': {
                'id': 8765432109,
                'number': 123,
                'commit_id': 654321
              },
              'last_passed_build': None,
            },
            ...
          },
          'first_failed_build': {
            'id': 8765432109,
            'number': 123,
            'commit_id': 654321
          },
          'last_passed_build': None,
        },
      }
  """
  luci_project = context.luci_project_name
  project_api = projects.GetProjectAPI(luci_project)
  assert project_api, 'Unsupported project {}'.format(luci_project)

  # Gets previous builds, the builds are sorted by build number in descending
  # order.
  # No steps info in each build considering the response size.
  # Requests to buildbucket for each failed build separately.
  search_builds_response = buildbucket_client.SearchV2BuildsOnBuilder(
      build.builder,
      build_range=(None, build.id),
      page_size=constants.MAX_BUILDS_TO_CHECK)
  previous_builds = search_builds_response.builds

  need_go_back = False
  for prev_build in previous_builds:
    if prev_build.id == build.id:
      # TODO(crbug.com/969124): remove the check when SearchBuilds RPC works as
      # expected.
      continue

    prev_build_info = {
        'id': prev_build.id,
        'number': prev_build.number,
        'commit_id': prev_build.input.gitiles_commit.id
    }

    if prev_build.status == common_pb2.SUCCESS:
      # Found a passed build, update all failures.
      _UpdateCompileFailuresWithPreviousBuildInfo(detailed_compile_failures,
                                                  prev_build_info)
      return

    prev_compile_steps, prev_failures = (
        _GetPreviousCompileStepsAndFailuresInPreviousBuild(
            project_api, prev_build, detailed_compile_failures))

    for step_ui_name, step_info in detailed_compile_failures.iteritems():
      if not prev_compile_steps.get(step_ui_name):
        # For some reason the compile step didn't run in the previous build.
        need_go_back = True
        continue

      if prev_compile_steps.get(step_ui_name) and prev_compile_steps[
          step_ui_name].status == common_pb2.SUCCESS:
        # The step passed in the previous build, update all failures in this
        # step.
        _UpdateCompileFailuresWithPreviousBuildInfo(
            detailed_compile_failures,
            prev_build_info,
            prev_step_ui_name=step_ui_name)
        continue

      if not prev_failures.get(step_ui_name):
        # The step didn't pass nor fail, Findit cannot get useful information
        # from it, going back.
        need_go_back = True
        continue

      step_last_passed_found = True
      failures = step_info['failures']
      for targets_str, failure in failures.iteritems():
        if failure['last_passed_build']:
          # Last pass has been found for this failure, skip the failure.
          continue

        if prev_failures[step_ui_name]['failures'].get(targets_str):
          # The same failure happened in the previous build, going back.
          failure['first_failed_build'] = prev_build_info
          step_info['first_failed_build'] = prev_build_info
          need_go_back = True
          step_last_passed_found = False
        else:
          # The failure didn't happen in the previous build, first failure found
          failure['last_passed_build'] = prev_build_info

      if step_last_passed_found:
        step_info['last_passed_build'] = prev_build_info

    if not need_go_back:
      return