Exemple #1
0
def _testcase_reproduces_in_revision(testcase,
                                     testcase_file_path,
                                     job_type,
                                     revision,
                                     update_metadata=False):
    """Test to see if a test case reproduces in the specified revision."""
    build_manager.setup_build(revision)
    if not build_manager.check_app_path():
        raise errors.BuildSetupError(revision, job_type)

    if testcase_manager.check_for_bad_build(job_type, revision):
        log_message = 'Bad build at r%d. Skipping' % revision
        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)
        raise errors.BadBuildError(revision, job_type)

    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag)
    _log_output(revision, result)

    if update_metadata:
        _update_issue_metadata(testcase)

    return result
Exemple #2
0
def found_regression_near_extreme_revisions(testcase, testcase_file_path,
                                            job_type, revision_list, min_index,
                                            max_index):
    """Test to see if we regressed near either the min or max revision."""
    # Test a few of the most recent revisions.
    last_known_crashing_revision = revision_list[max_index]
    for offset in xrange(1, EXTREME_REVISIONS_TO_TEST + 1):
        current_index = max_index - offset
        if current_index < min_index:
            break

        # If we don't crash in a recent revision, we regressed in one of the
        # commits between the current revision and the one at the next index.
        try:
            is_crash = _testcase_reproduces_in_revision(
                testcase, testcase_file_path, job_type,
                revision_list[current_index])
        except errors.BadBuildError:
            # Skip this revision.
            continue

        if not is_crash:
            save_regression_range(testcase.key.id(),
                                  revision_list[current_index],
                                  last_known_crashing_revision)
            return True

        last_known_crashing_revision = revision_list[current_index]

    # Test to see if we crash in the oldest revision we can run. This is a pre-
    # condition for our binary search. If we do crash in that revision, it
    # implies that we regressed between the first commit and our first revision,
    # which we represent as 0:|min_revision|.
    for _ in xrange(EXTREME_REVISIONS_TO_TEST):
        min_revision = revision_list[min_index]

        try:
            crashes_in_min_revision = _testcase_reproduces_in_revision(
                testcase,
                testcase_file_path,
                job_type,
                min_revision,
                should_log=False)
        except errors.BadBuildError:
            # If we find a bad build, potentially try another.
            if min_index + 1 >= max_index:
                break

            min_index += 1
            continue

        if crashes_in_min_revision:
            save_regression_range(testcase.key.id(), 0, min_revision)
            return True

        return False

    # We should have returned above. If we get here, it means we tried too many
    # builds near the min revision, and they were all bad.
    raise errors.BadBuildError(revision_list[min_index], job_type)
def _testcase_reproduces_in_revision(testcase,
                                     testcase_file_path,
                                     job_type,
                                     revision,
                                     should_log=True,
                                     min_revision=None,
                                     max_revision=None):
  """Test to see if a test case reproduces in the specified revision."""
  if should_log:
    log_message = 'Testing r%d' % revision
    if min_revision is not None and max_revision is not None:
      log_message += ' (current range %d:%d)' % (min_revision, max_revision)

    testcase = data_handler.get_testcase_by_id(testcase.key.id())
    data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
                                         log_message)

  build_manager.setup_regular_build(revision)
  app_path = environment.get_value('APP_PATH')
  if not app_path:
    raise errors.BuildSetupError(revision, job_type)

  if tests.check_for_bad_build(job_type, revision):
    log_message = 'Bad build at r%d. Skipping' % revision
    testcase = data_handler.get_testcase_by_id(testcase.key.id())
    data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
                                         log_message)
    raise errors.BadBuildError(revision, job_type)

  test_timeout = environment.get_value('TEST_TIMEOUT', 10)
  result = tests.test_for_crash_with_retries(
      testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
  return result.is_crash()