Пример #1
0
def _check_commits(testcase, bisect_type, old_commit, new_commit):
    """Check old and new commit validity."""
    if old_commit != new_commit or build_manager.is_custom_binary():
        return old_commit, new_commit

    # Something went wrong during bisection for the same commit to be chosen for
    # both the start and end range.
    # Get the bisection infrastructure to re-bisect.
    if environment.is_running_on_app_engine():
        bucket_path = data_handler.get_value_from_job_definition(
            testcase.job_type, 'RELEASE_BUILD_BUCKET_PATH')
    else:
        bucket_path = build_manager.get_primary_bucket_path()
    revision_list = build_manager.get_revisions_list(bucket_path)

    last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
    known_crash_revision = last_tested_revision or testcase.crash_revision

    if bisect_type == 'fixed':
        # Narrowest range: last crashing revision up to the latest build.
        return _get_commits(
            str(known_crash_revision) + ':' + str(revision_list[-1]),
            testcase.job_type)

    if bisect_type == 'regressed':
        # Narrowest range: first build to the first crashing revision.
        return _get_commits(
            str(revision_list[0]) + ':' + str(testcase.crash_revision),
            testcase.job_type)

    raise ValueError('Invalid bisection type: ' + bisect_type)
Пример #2
0
def create_regression_task_if_needed(testcase):
    """Creates a regression task if needed."""
    # We cannot run regression job for custom binaries since we don't have any
    # archived builds for previous revisions. We only track the last uploaded
    # custom build.
    if build_manager.is_custom_binary():
        return

    tasks.add_task('regression', testcase.key.id(), testcase.job_type)
Пример #3
0
def create_symbolize_task_if_needed(testcase):
    """Creates a symbolize task if needed."""
    # We cannot run symbolize job for custom binaries since we don't have any
    # archived symbolized builds.
    if build_manager.is_custom_binary():
        return

    # Make sure we have atleast one symbolized url pattern defined in job type.
    if not build_manager.has_symbolized_builds():
        return

    tasks.add_task('symbolize', testcase.key.id(), testcase.job_type)
Пример #4
0
def _is_predator_testcase(testcase):
    """Return bool and error message for whether this testcase is applicable to
  predator or not."""
    if build_manager.is_custom_binary():
        return False, 'Not applicable to custom binaries.'

    if testcase.regression != 'NA':
        if not testcase.regression:
            return False, 'No regression range, wait for regression task to finish.'

        if ':' not in testcase.regression:
            return False, 'Invalid regression range %s.' % testcase.regression

    return True, None
Пример #5
0
def create_impact_task_if_needed(testcase):
    """Creates an impact task if needed."""
    # Impact doesn't make sense for non-chromium projects.
    if not utils.is_chromium():
        return

    # Impact is only applicable to chromium project, otherwise bail out.
    if testcase.project_name != 'chromium':
        return

    # We cannot run impact job for custom binaries since we don't have any
    # archived production builds for these.
    if build_manager.is_custom_binary():
        return

    tasks.add_task('impact', testcase.key.id(), testcase.job_type)
Пример #6
0
def setup_build(testcase):
    """Set up a custom or regular build based on revision. For regular builds,
  if a provided revision is not found, set up a build with the
  closest revision <= provided revision."""
    revision = testcase.crash_revision

    if revision and not build_manager.is_custom_binary():
        build_bucket_path = build_manager.get_primary_bucket_path()
        revision_list = build_manager.get_revisions_list(build_bucket_path,
                                                         testcase=testcase)
        if not revision_list:
            logs.log_error('Failed to fetch revision list.')
            return

        revision_index = revisions.find_min_revision_index(
            revision_list, revision)
        if revision_index is None:
            raise errors.BuildNotFoundError(revision, testcase.job_type)
        revision = revision_list[revision_index]

    build_manager.setup_build(revision)
Пример #7
0
def create_blame_task_if_needed(testcase):
    """Creates a blame task if needed."""
    # Blame doesn't work for non-chromium projects.
    if not utils.is_chromium():
        return

    # Blame is only applicable to chromium project, otherwise bail out.
    if testcase.project_name != 'chromium':
        return

    # We cannot run blame job for custom binaries since we don't have any context
    # on the crash revision and regression range.
    if build_manager.is_custom_binary():
        return

    # Don't send duplicate issues to Predator. This causes issues with metrics
    # tracking and wastes cycles.
    if testcase.status == 'Duplicate':
        return

    create_task = False
    if testcase.one_time_crasher_flag:
        # For unreproducible testcases, it is still beneficial to get component
        # information from blame task.
        create_task = True
    else:
        # Reproducible testcase.
        # Step 1: Check if the regression task finished. If not, bail out.
        if not testcase.regression:
            return

        # Step 2: Check if the symbolize task is applicable and finished. If not,
        # bail out.
        if build_manager.has_symbolized_builds() and not testcase.symbolized:
            return

        create_task = True

    if create_task:
        tasks.add_task('blame', testcase.key.id(), testcase.job_type)
Пример #8
0
def find_fixed_range(testcase_id, job_type):
  """Attempt to find the revision range where a testcase was fixed."""
  deadline = tasks.get_task_completion_deadline()
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  if testcase.fixed:
    logs.log_error('Fixed range is already set as %s, skip.' % testcase.fixed)
    return

  # Setup testcase and its dependencies.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
  if not file_list:
    return

  # Set a flag to indicate we are running progression task. This shows pending
  # status on testcase report page and avoid conflicting testcase updates by
  # triage cron.
  testcase.set_metadata('progression_pending', True)

  # Custom binaries are handled as special cases.
  if build_manager.is_custom_binary():
    _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path)
    return

  build_bucket_path = build_manager.get_primary_bucket_path()
  revision_list = build_manager.get_revisions_list(
      build_bucket_path, testcase=testcase)
  if not revision_list:
    data_handler.close_testcase_with_error(testcase_id,
                                           'Failed to fetch revision list')
    return

  # Use min, max_index to mark the start and end of revision list that is used
  # for bisecting the progression range. Set start to the revision where noticed
  # the crash. Set end to the trunk revision. Also, use min, max from past run
  # if it timed out.
  min_revision = testcase.get_metadata('last_progression_min')
  max_revision = testcase.get_metadata('last_progression_max')

  if min_revision or max_revision:
    # Clear these to avoid using them in next run. If this run fails, then we
    # should try next run without them to see it succeeds. If this run succeeds,
    # we should still clear them to avoid capping max revision in next run.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.delete_metadata('last_progression_min', update_testcase=False)
    testcase.delete_metadata('last_progression_max', update_testcase=False)
    testcase.put()

  last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
  known_crash_revision = last_tested_revision or testcase.crash_revision
  if not min_revision:
    min_revision = known_crash_revision
  if not max_revision:
    max_revision = revisions.get_last_revision_in_list(revision_list)

  min_index = revisions.find_min_revision_index(revision_list, min_revision)
  if min_index is None:
    raise errors.BuildNotFoundError(min_revision, job_type)
  max_index = revisions.find_max_revision_index(revision_list, max_revision)
  if max_index is None:
    raise errors.BuildNotFoundError(max_revision, job_type)

  testcase = data_handler.get_testcase_by_id(testcase_id)
  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED,
                                       'r%d' % max_revision)

  # Check to see if this testcase is still crashing now. If it is, then just
  # bail out.
  result = _testcase_reproduces_in_revision(
      testcase,
      testcase_file_path,
      job_type,
      max_revision,
      update_metadata=True)
  if result.is_crash():
    logs.log('Found crash with same signature on latest revision r%d.' %
             max_revision)
    app_path = environment.get_value('APP_PATH')
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
        stacktrace)
    data_handler.update_progression_completion_metadata(
        testcase,
        max_revision,
        is_crash=True,
        message='still crashes on latest revision r%s' % max_revision)

    # Since we've verified that the test case is still crashing, clear out any
    # metadata indicating potential flake from previous runs.
    task_creation.mark_unreproducible_if_flaky(testcase, False)

    # For chromium project, save latest crash information for later upload
    # to chromecrash/.
    state = result.get_symbolized_data()
    crash_uploader.save_crash_info_if_needed(testcase_id, max_revision,
                                             job_type, state.crash_type,
                                             state.crash_address, state.frames)
    return

  if result.unexpected_crash:
    testcase.set_metadata('crashes_on_unexpected_state', True)
  else:
    testcase.delete_metadata('crashes_on_unexpected_state')

  # Don't burden NFS server with caching these random builds.
  environment.set_value('CACHE_STORE', False)

  # Verify that we do crash in the min revision. This is assumed to be true
  # while we are doing the bisect.
  result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                            job_type, min_revision)
  if result and not result.is_crash():
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # Retry once on another bot to confirm our result.
    if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
      tasks.add_task('progression', testcase_id, job_type)
      error_message = (
          'Known crash revision %d did not crash, will retry on another bot to '
          'confirm result' % known_crash_revision)
      data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                           error_message)
      data_handler.update_progression_completion_metadata(
          testcase, max_revision)
      return

    data_handler.clear_progression_pending(testcase)
    error_message = (
        'Known crash revision %d did not crash' % known_crash_revision)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    task_creation.mark_unreproducible_if_flaky(testcase, True)
    return

  # Start a binary search to find last non-crashing revision. At this point, we
  # know that we do crash in the min_revision, and do not crash in max_revision.
  while time.time() < deadline:
    min_revision = revision_list[min_index]
    max_revision = revision_list[max_index]

    # If the min and max revisions are one apart this is as much as we can
    # narrow the range.
    if max_index - min_index == 1:
      _save_fixed_range(testcase_id, min_revision, max_revision,
                        testcase_file_path)
      return

    # Occasionally, we get into this bad state. It seems to be related to test
    # cases with flaky stacks, but the exact cause is unknown.
    if max_index - min_index < 1:
      testcase = data_handler.get_testcase_by_id(testcase_id)
      testcase.fixed = 'NA'
      testcase.open = False
      message = ('Fixed testing errored out (min and max revisions '
                 'are both %d)' % min_revision)
      data_handler.update_progression_completion_metadata(
          testcase, max_revision, message=message)

      # Let the bisection service know about the NA status.
      bisection.request_bisection(testcase)
      return

    # Test the middle revision of our range.
    middle_index = (min_index + max_index) // 2
    middle_revision = revision_list[middle_index]

    testcase = data_handler.get_testcase_by_id(testcase_id)
    log_message = 'Testing r%d (current range %d:%d)' % (
        middle_revision, min_revision, max_revision)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
                                         log_message)

    try:
      result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                                job_type, middle_revision)
    except errors.BadBuildError:
      # Skip this revision.
      del revision_list[middle_index]
      max_index -= 1
      continue

    if result.is_crash():
      min_index = middle_index
    else:
      max_index = middle_index

    _save_current_fixed_range_indices(testcase_id, revision_list[min_index],
                                      revision_list[max_index])

  # If we've broken out of the loop, we've exceeded the deadline. Recreate the
  # task to pick up where we left off.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  error_message = ('Timed out, current range r%d:r%d' %
                   (revision_list[min_index], revision_list[max_index]))
  data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                       error_message)
  tasks.add_task('progression', testcase_id, job_type)
Пример #9
0
def find_regression_range(testcase_id, job_type):
    """Attempt to find when the testcase regressed."""
    deadline = tasks.get_task_completion_deadline()
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if testcase.regression:
        logs.log_error('Regression range is already set as %s, skip.' %
                       testcase.regression)
        return

    # This task is not applicable for custom binaries.
    if build_manager.is_custom_binary():
        testcase.regression = 'NA'
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Not applicable for custom binaries')
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Failed to setup testcase')
        tasks.add_task('regression', testcase_id, job_type)
        return

    build_bucket_path = build_manager.get_primary_bucket_path()
    revision_list = build_manager.get_revisions_list(build_bucket_path,
                                                     testcase=testcase)
    if not revision_list:
        data_handler.close_testcase_with_error(
            testcase_id, 'Failed to fetch revision list')
        return

    # Don't burden NFS server with caching these random builds.
    environment.set_value('CACHE_STORE', False)

    # Pick up where left off in a previous run if necessary.
    min_revision = testcase.get_metadata('last_regression_min')
    max_revision = testcase.get_metadata('last_regression_max')
    first_run = not min_revision and not max_revision
    if not min_revision:
        min_revision = revisions.get_first_revision_in_list(revision_list)
    if not max_revision:
        max_revision = testcase.crash_revision

    min_index = revisions.find_min_revision_index(revision_list, min_revision)
    if min_index is None:
        raise errors.BuildNotFoundError(min_revision, job_type)
    max_index = revisions.find_max_revision_index(revision_list, max_revision)
    if max_index is None:
        raise errors.BuildNotFoundError(max_revision, job_type)

    # Make sure that the revision where we noticed the crash, still crashes at
    # that revision. Otherwise, our binary search algorithm won't work correctly.
    max_revision = revision_list[max_index]
    crashes_in_max_revision = _testcase_reproduces_in_revision(
        testcase, testcase_file_path, job_type, max_revision, should_log=False)
    if not crashes_in_max_revision:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        error_message = ('Known crash revision %d did not crash' %
                         max_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        task_creation.mark_unreproducible_if_flaky(testcase, True)
        return

    # If we've made it this far, the test case appears to be reproducible. Clear
    # metadata from previous runs had it been marked as potentially flaky.
    task_creation.mark_unreproducible_if_flaky(testcase, False)

    # On the first run, check to see if we regressed near either the min or max
    # revision.
    if first_run and found_regression_near_extreme_revisions(
            testcase, testcase_file_path, job_type, revision_list, min_index,
            max_index):
        return

    while time.time() < deadline:
        min_revision = revision_list[min_index]
        max_revision = revision_list[max_index]

        # If the min and max revisions are one apart (or the same, if we only have
        # one build), this is as much as we can narrow the range.
        if max_index - min_index <= 1:
            # Verify that the regression range seems correct, and save it if so.
            if not validate_regression_range(testcase, testcase_file_path,
                                             job_type, revision_list,
                                             min_index):
                return

            save_regression_range(testcase_id, min_revision, max_revision)
            return

        middle_index = (min_index + max_index) // 2
        middle_revision = revision_list[middle_index]
        try:
            is_crash = _testcase_reproduces_in_revision(
                testcase,
                testcase_file_path,
                job_type,
                middle_revision,
                min_revision=min_revision,
                max_revision=max_revision)
        except errors.BadBuildError:
            # Skip this revision.
            del revision_list[middle_index]
            max_index -= 1
            continue

        if is_crash:
            max_index = middle_index
        else:
            min_index = middle_index

        _save_current_regression_range_indices(testcase_id,
                                               revision_list[min_index],
                                               revision_list[max_index])

    # If we've broken out of the above loop, we timed out. We'll finish by
    # running another regression task and picking up from this point.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    error_message = 'Timed out, current range r%d:r%d' % (
        revision_list[min_index], revision_list[max_index])
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    tasks.add_task('regression', testcase_id, job_type)
Пример #10
0
def execute_task(testcase_id, job_type):
    """Attempt to find if the testcase affects release branches on Chromium."""
    # This shouldn't ever get scheduled, but check just in case.
    if not utils.is_chromium():
        return

    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # If this testcase is fixed, we should no longer be doing impact testing.
    if testcase.fixed and testcase.is_impact_set_flag:
        return

    # For testcases with status unreproducible, we just do impact analysis just
    # once.
    if testcase.is_status_unreproducible() and testcase.is_impact_set_flag:
        return

    # Update comments only after checking the above bailout conditions.
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # This task is not applicable to unreproducible testcases.
    if testcase.one_time_crasher_flag:
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Not applicable for unreproducible testcases')
        return

    # This task is not applicable for custom binaries. We cannot remove the
    # creation of such tasks specifically for custom binary testcase in cron,
    # so exit gracefully.
    if build_manager.is_custom_binary():
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.FINISHED,
            'Not applicable for custom binaries')
        return

    # If we don't have a stable or beta build url pattern, we try to use build
    # information url to make a guess.
    if not build_manager.has_production_builds():
        if not testcase.regression:
            data_handler.update_testcase_comment(
                testcase, data_types.TaskState.FINISHED,
                'Cannot run without regression range, will re-run once regression '
                'task finishes')
            return

        impacts = get_impacts_from_url(testcase.regression, testcase.job_type)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        set_testcase_with_impacts(testcase, impacts)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED)
        return

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Setup extended stable, stable, beta builds
    # and get impact and crash stacktrace.
    try:
        impacts = get_impacts_on_prod_builds(testcase, testcase_file_path)
    except BuildFailedException as error:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             str(error))
        tasks.add_task('impact',
                       testcase_id,
                       job_type,
                       wait_time=environment.get_value('FAIL_WAIT'))
        return

    testcase = data_handler.get_testcase_by_id(testcase_id)
    set_testcase_with_impacts(testcase, impacts)

    # Set stacktrace in case we have a unreproducible crash on trunk,
    # but it crashes on one of the production builds.
    if testcase.is_status_unreproducible() and impacts.get_extra_trace():
        testcase.crash_stacktrace = data_handler.filter_stacktrace(
            '%s\n\n%s' %
            (data_handler.get_stacktrace(testcase), impacts.get_extra_trace()))

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)