Exemplo n.º 1
0
def _mark_as_fixed(testcase, revision):
    """Mark bug as fixed."""
    testcase.open = False
    # Bisection not available for external reproduction infrastructure. Assume
    # range (crash revision : current revision).
    testcase.fixed = f'{testcase.crash_revision}:{revision}'
    data_handler.update_progression_completion_metadata(
        testcase, revision, message=f'fixed in r{revision}')
Exemplo n.º 2
0
def _mark_errored(testcase, revision, error):
    """Mark testcase as errored out."""
    message = 'Received error from external infra, marking testcase as NA.'
    logs.log_warn(message, error=error, testcase_id=testcase.key.id())

    testcase.fixed = 'NA'
    testcase.open = False
    data_handler.update_progression_completion_metadata(testcase,
                                                        revision,
                                                        message=message)
Exemplo n.º 3
0
def handle_update(testcase, revision, stacktrace, error):
    """Handle update."""
    logs.log('Got external update for testcase.',
             testcase_id=testcase.key.id())
    if error:
        _mark_errored(testcase, revision, error)
        return

    last_tested_revision = (testcase.get_metadata('last_tested_revision')
                            or testcase.crash_revision)

    if revision < last_tested_revision:
        logs.log_warn(f'Revision {revision} less than previously tested '
                      f'revision {last_tested_revision}.')
        return

    fuzz_target = testcase.get_fuzz_target()
    if fuzz_target:
        fuzz_target_name = fuzz_target.binary
    else:
        fuzz_target_name = None

    # Record use of fuzz target to avoid garbage collection (since fuzz_task does
    # not run).
    data_handler.record_fuzz_target(fuzz_target.engine, fuzz_target.binary,
                                    testcase.job_type)

    state = stack_analyzer.get_crash_data(stacktrace,
                                          fuzz_target=fuzz_target_name,
                                          symbolize_flag=False,
                                          already_symbolized=True,
                                          detect_ooms_and_hangs=True)
    crash_comparer = CrashComparer(state.crash_state, testcase.crash_state)
    if not crash_comparer.is_similar():
        logs.log(f'State no longer similar ('
                 f'testcase_id={testcase.key.id()}, '
                 f'old_state={testcase.crash_state}, '
                 f'new_state={state.crash_state})')
        _mark_as_fixed(testcase, revision)
        return

    is_security = crash_analyzer.is_security_issue(state.crash_stacktrace,
                                                   state.crash_type,
                                                   state.crash_address)
    if is_security != testcase.security_flag:
        logs.log(f'Security flag for {testcase.key.id()} no longer matches.')
        _mark_as_fixed(testcase, revision)
        return

    logs.log(f'{testcase.key.id()} still crashes.')
    testcase.last_tested_crash_stacktrace = stacktrace
    data_handler.update_progression_completion_metadata(testcase,
                                                        revision,
                                                        is_crash=True)
Exemplo n.º 4
0
def _save_fixed_range(testcase_id, min_revision, max_revision,
                      testcase_file_path):
  """Update a test case and other metadata with a fixed range."""
  testcase = data_handler.get_testcase_by_id(testcase_id)
  testcase.fixed = '%d:%d' % (min_revision, max_revision)
  testcase.open = False

  data_handler.update_progression_completion_metadata(
      testcase, max_revision, message='fixed in range r%s' % testcase.fixed)
  _write_to_bigquery(testcase, min_revision, max_revision)

  _store_testcase_for_regression_testing(testcase, testcase_file_path)
Exemplo n.º 5
0
def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):
  """Simplified fixed check for test cases using custom binaries."""
  revision = environment.get_value('APP_REVISION')

  # Update comments to reflect bot information and clean up old comments.
  testcase_id = testcase.key.id()
  testcase = data_handler.get_testcase_by_id(testcase_id)
  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  build_manager.setup_build()
  if not build_manager.check_app_path():
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.ERROR,
        'Build setup failed for custom binary')
    build_fail_wait = environment.get_value('FAIL_WAIT')
    tasks.add_task(
        'progression', testcase_id, job_type, wait_time=build_fail_wait)
    return

  test_timeout = environment.get_value('TEST_TIMEOUT', 10)
  result = testcase_manager.test_for_crash_with_retries(
      testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
  _log_output(revision, result)

  # Re-fetch to finalize testcase updates in branches below.
  testcase = data_handler.get_testcase_by_id(testcase.key.id())

  # If this still crashes on the most recent build, it's not fixed. The task
  # will be rescheduled by a cron job and re-attempted eventually.
  if result.is_crash():
    app_path = environment.get_value('APP_PATH')
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
    testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
        stacktrace)
    data_handler.update_progression_completion_metadata(
        testcase,
        revision,
        is_crash=True,
        message='still crashes on latest custom build')
    return

  if result.unexpected_crash:
    testcase.set_metadata(
        'crashes_on_unexpected_state', True, update_testcase=False)
  else:
    testcase.delete_metadata(
        'crashes_on_unexpected_state', update_testcase=False)

  # Retry once on another bot to confirm our results and in case this bot is in
  # a bad state which we didn't catch through our usual means.
  if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
    tasks.add_task('progression', testcase_id, job_type)
    data_handler.update_progression_completion_metadata(testcase, revision)
    return

  # The bug is fixed.
  testcase.fixed = 'Yes'
  testcase.open = False
  data_handler.update_progression_completion_metadata(
      testcase, revision, message='fixed on latest custom build')
Exemplo n.º 6
0
def find_fixed_range(testcase_id, job_type):
  """Attempt to find the revision range where a testcase was fixed."""
  deadline = tasks.get_task_completion_deadline()
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  if testcase.fixed:
    logs.log_error('Fixed range is already set as %s, skip.' % testcase.fixed)
    return

  # Setup testcase and its dependencies.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
  if not file_list:
    return

  # Set a flag to indicate we are running progression task. This shows pending
  # status on testcase report page and avoid conflicting testcase updates by
  # triage cron.
  testcase.set_metadata('progression_pending', True)

  # Custom binaries are handled as special cases.
  if build_manager.is_custom_binary():
    _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path)
    return

  build_bucket_path = build_manager.get_primary_bucket_path()
  revision_list = build_manager.get_revisions_list(
      build_bucket_path, testcase=testcase)
  if not revision_list:
    data_handler.close_testcase_with_error(testcase_id,
                                           'Failed to fetch revision list')
    return

  # Use min, max_index to mark the start and end of revision list that is used
  # for bisecting the progression range. Set start to the revision where noticed
  # the crash. Set end to the trunk revision. Also, use min, max from past run
  # if it timed out.
  min_revision = testcase.get_metadata('last_progression_min')
  max_revision = testcase.get_metadata('last_progression_max')

  if min_revision or max_revision:
    # Clear these to avoid using them in next run. If this run fails, then we
    # should try next run without them to see it succeeds. If this run succeeds,
    # we should still clear them to avoid capping max revision in next run.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.delete_metadata('last_progression_min', update_testcase=False)
    testcase.delete_metadata('last_progression_max', update_testcase=False)
    testcase.put()

  last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
  known_crash_revision = last_tested_revision or testcase.crash_revision
  if not min_revision:
    min_revision = known_crash_revision
  if not max_revision:
    max_revision = revisions.get_last_revision_in_list(revision_list)

  min_index = revisions.find_min_revision_index(revision_list, min_revision)
  if min_index is None:
    raise errors.BuildNotFoundError(min_revision, job_type)
  max_index = revisions.find_max_revision_index(revision_list, max_revision)
  if max_index is None:
    raise errors.BuildNotFoundError(max_revision, job_type)

  testcase = data_handler.get_testcase_by_id(testcase_id)
  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED,
                                       'r%d' % max_revision)

  # Check to see if this testcase is still crashing now. If it is, then just
  # bail out.
  result = _testcase_reproduces_in_revision(
      testcase,
      testcase_file_path,
      job_type,
      max_revision,
      update_metadata=True)
  if result.is_crash():
    logs.log('Found crash with same signature on latest revision r%d.' %
             max_revision)
    app_path = environment.get_value('APP_PATH')
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
    testcase = data_handler.get_testcase_by_id(testcase_id)
    testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
        stacktrace)
    data_handler.update_progression_completion_metadata(
        testcase,
        max_revision,
        is_crash=True,
        message='still crashes on latest revision r%s' % max_revision)

    # Since we've verified that the test case is still crashing, clear out any
    # metadata indicating potential flake from previous runs.
    task_creation.mark_unreproducible_if_flaky(testcase, False)

    # For chromium project, save latest crash information for later upload
    # to chromecrash/.
    state = result.get_symbolized_data()
    crash_uploader.save_crash_info_if_needed(testcase_id, max_revision,
                                             job_type, state.crash_type,
                                             state.crash_address, state.frames)
    return

  if result.unexpected_crash:
    testcase.set_metadata('crashes_on_unexpected_state', True)
  else:
    testcase.delete_metadata('crashes_on_unexpected_state')

  # Don't burden NFS server with caching these random builds.
  environment.set_value('CACHE_STORE', False)

  # Verify that we do crash in the min revision. This is assumed to be true
  # while we are doing the bisect.
  result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                            job_type, min_revision)
  if result and not result.is_crash():
    testcase = data_handler.get_testcase_by_id(testcase_id)

    # Retry once on another bot to confirm our result.
    if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
      tasks.add_task('progression', testcase_id, job_type)
      error_message = (
          'Known crash revision %d did not crash, will retry on another bot to '
          'confirm result' % known_crash_revision)
      data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                           error_message)
      data_handler.update_progression_completion_metadata(
          testcase, max_revision)
      return

    data_handler.clear_progression_pending(testcase)
    error_message = (
        'Known crash revision %d did not crash' % known_crash_revision)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    task_creation.mark_unreproducible_if_flaky(testcase, True)
    return

  # Start a binary search to find last non-crashing revision. At this point, we
  # know that we do crash in the min_revision, and do not crash in max_revision.
  while time.time() < deadline:
    min_revision = revision_list[min_index]
    max_revision = revision_list[max_index]

    # If the min and max revisions are one apart this is as much as we can
    # narrow the range.
    if max_index - min_index == 1:
      _save_fixed_range(testcase_id, min_revision, max_revision,
                        testcase_file_path)
      return

    # Occasionally, we get into this bad state. It seems to be related to test
    # cases with flaky stacks, but the exact cause is unknown.
    if max_index - min_index < 1:
      testcase = data_handler.get_testcase_by_id(testcase_id)
      testcase.fixed = 'NA'
      testcase.open = False
      message = ('Fixed testing errored out (min and max revisions '
                 'are both %d)' % min_revision)
      data_handler.update_progression_completion_metadata(
          testcase, max_revision, message=message)

      # Let the bisection service know about the NA status.
      bisection.request_bisection(testcase)
      return

    # Test the middle revision of our range.
    middle_index = (min_index + max_index) // 2
    middle_revision = revision_list[middle_index]

    testcase = data_handler.get_testcase_by_id(testcase_id)
    log_message = 'Testing r%d (current range %d:%d)' % (
        middle_revision, min_revision, max_revision)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP,
                                         log_message)

    try:
      result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                                job_type, middle_revision)
    except errors.BadBuildError:
      # Skip this revision.
      del revision_list[middle_index]
      max_index -= 1
      continue

    if result.is_crash():
      min_index = middle_index
    else:
      max_index = middle_index

    _save_current_fixed_range_indices(testcase_id, revision_list[min_index],
                                      revision_list[max_index])

  # If we've broken out of the loop, we've exceeded the deadline. Recreate the
  # task to pick up where we left off.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  error_message = ('Timed out, current range r%d:r%d' %
                   (revision_list[min_index], revision_list[max_index]))
  data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                       error_message)
  tasks.add_task('progression', testcase_id, job_type)
Exemplo n.º 7
0
def handle_update(testcase, revision, stacktraces, error, protocol_version):
    """Handle update."""
    def is_still_crashing(st_index, stacktrace):
        """Check if the the given stackstrace indicates
      the testcase is still crashing"""
        state = stack_analyzer.get_crash_data(stacktrace,
                                              fuzz_target=fuzz_target_name,
                                              symbolize_flag=False,
                                              already_symbolized=True,
                                              detect_ooms_and_hangs=True)

        crash_comparer = CrashComparer(state.crash_state, testcase.crash_state)
        if not crash_comparer.is_similar():
            return False

        logs.log(f'State for trial {st_index} of {testcase_id} '
                 f'remains similar'
                 f'(old_state={testcase.crash_state}, '
                 f'new_state={state.crash_state}).')

        is_security = crash_analyzer.is_security_issue(state.crash_stacktrace,
                                                       state.crash_type,
                                                       state.crash_address)
        if is_security != testcase.security_flag:
            return False

        logs.log(f'Security flag for trial {st_index} of {testcase_id} '
                 f'still matches'
                 f'({testcase.security_flag}).')
        return True

    testcase_id = testcase.key.id()
    logs.log('Got external update for testcase.', testcase_id=testcase_id)
    if error:
        _mark_errored(testcase, revision, error)
        return

    last_tested_revision = (testcase.get_metadata('last_tested_revision')
                            or testcase.crash_revision)

    if revision < last_tested_revision:
        logs.log_warn(f'Revision {revision} less than previously tested '
                      f'revision {last_tested_revision}.')
        return

    if protocol_version not in [OLD_PROTOCOL, NEW_PROTOCOL]:
        logs.log_error(f'Invalid protocol_version provided: '
                       f'{protocol_version} '
                       f'is not one of {{{OLD_PROTOCOL, NEW_PROTOCOL}}} '
                       f'(testcase_id={testcase_id}).')
        return

    if not stacktraces:
        logs.log_error(f'Empty JSON stacktrace list provided '
                       f'(testcase_id={testcase_id}).')
        return

    fuzz_target = testcase.get_fuzz_target()
    if fuzz_target:
        fuzz_target_name = fuzz_target.binary
    else:
        fuzz_target_name = None

    # Record use of fuzz target to avoid garbage collection (since fuzz_task does
    # not run).
    data_handler.record_fuzz_target(fuzz_target.engine, fuzz_target.binary,
                                    testcase.job_type)

    for st_index, stacktrace in enumerate(stacktraces):
        if is_still_crashing(st_index, stacktrace):
            logs.log(f'stacktrace {st_index} of {testcase_id} still crashes.')
            testcase.last_tested_crash_stacktrace = stacktrace
            data_handler.update_progression_completion_metadata(testcase,
                                                                revision,
                                                                is_crash=True)
            return

    # All trials resulted in a non-crash. Close the testcase.
    logs.log(f'No matching crash detected in {testcase_id} '
             f'over {len(stacktraces)} trials, marking as fixed.')
    _mark_as_fixed(testcase, revision)