コード例 #1
0
ファイル: utils_test.py プロジェクト: wdgreen/clusterfuzz
 def test_debug_sym_stack_2(self):
     """Tests debug build with a symbolized stack, with build type not explicitly
 passed."""
     self.assertEqual(
         '[Command line] cmd_line_dbg\n\n' + self.start_seperator +
         'Debug Build Stacktrace' + self.end_seperator + '\nsym_stack',
         utils.get_crash_stacktrace_output('cmd_line_dbg', 'sym_stack'))
コード例 #2
0
ファイル: utils_test.py プロジェクト: wdgreen/clusterfuzz
 def test_beta_sym_stack(self):
     """Tests beta build with a symbolized stack, with build type not
 explicitly passed."""
     self.assertEqual(
         '[Command line] cmd_line_beta\n\n' + self.start_seperator +
         'Beta Build Stacktrace' + self.end_seperator + '\nsym_stack',
         utils.get_crash_stacktrace_output('cmd_line_beta', 'sym_stack'))
コード例 #3
0
ファイル: impact_task.py プロジェクト: stl314159/clusterfuzz
def get_impact_on_build(build_type, current_version, testcase,
                        testcase_file_path):
    """Return impact and additional trace on a prod build given build_type."""
    build = build_manager.setup_production_build(build_type)
    if not build:
        raise BuildFailedException('Build setup failed for %s' %
                                   build_type.capitalize())

    app_path = environment.get_value('APP_PATH')
    if not app_path:
        raise AppFailedException()

    version = build.revision
    if version == current_version:
        return Impact(current_version, likely=False)

    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        environment.get_value('TEST_TIMEOUT'),
        http_flag=testcase.http_flag)

    if result.is_crash():
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        stacktrace = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace, build_type)
        return Impact(version, likely=False, extra_trace=stacktrace)

    return Impact()
コード例 #4
0
def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):
  """Simplified fixed check for test cases using custom binaries."""
  revision = environment.get_value('APP_REVISION')

  # Update comments to reflect bot information and clean up old comments.
  testcase_id = testcase.key.id()
  testcase = data_handler.get_testcase_by_id(testcase_id)
  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  build_manager.setup_build()
  if not build_manager.check_app_path():
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.ERROR,
        'Build setup failed for custom binary')
    build_fail_wait = environment.get_value('FAIL_WAIT')
    tasks.add_task(
        'progression', testcase_id, job_type, wait_time=build_fail_wait)
    return

  test_timeout = environment.get_value('TEST_TIMEOUT', 10)
  result = testcase_manager.test_for_crash_with_retries(
      testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
  _log_output(revision, result)

  # Re-fetch to finalize testcase updates in branches below.
  testcase = data_handler.get_testcase_by_id(testcase.key.id())

  # If this still crashes on the most recent build, it's not fixed. The task
  # will be rescheduled by a cron job and re-attempted eventually.
  if result.is_crash():
    app_path = environment.get_value('APP_PATH')
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
    testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
        stacktrace)
    _update_completion_metadata(
        testcase,
        revision,
        is_crash=True,
        message='still crashes on latest custom build')
    return

  # Retry once on another bot to confirm our results and in case this bot is in
  # a bad state which we didn't catch through our usual means.
  if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
    tasks.add_task('progression', testcase_id, job_type)
    _update_completion_metadata(testcase, revision)
    return

  # The bug is fixed.
  testcase.fixed = 'Yes'
  testcase.open = False
  _update_completion_metadata(
      testcase, revision, message='fixed on latest custom build')
コード例 #5
0
ファイル: utils_test.py プロジェクト: wdgreen/clusterfuzz
 def test_env_settings(self):
     """Tests that environment settings are added."""
     os.environ['ASAN_OPTIONS'] = 'setting1=value1:setting2=value_2'
     self.assertEqual(
         '[Environment] ASAN_OPTIONS="setting1=value1:setting2=value_2"\n'
         '[Command line] cmd_line\n\n' + self.start_seperator +
         'Release Build Stacktrace' + self.end_seperator + '\nsym_stack',
         utils.get_crash_stacktrace_output('cmd_line', 'sym_stack'))
コード例 #6
0
 def test_debug_sym_and_unsym_stacks(self):
   """Tests debug build with symbolized and unsymbolized stacks, with build
   type explicitly passed."""
   self.assertEqual(
       '[Command line] cmd_line\n\n' + self.start_seperator +
       'Debug Build Stacktrace' + self.end_seperator + '\nsym_stack\n\n' +
       self.start_seperator + 'Debug Build Unsymbolized Stacktrace (diff)' +
       self.end_seperator + '\n\nunsym_stack',
       utils.get_crash_stacktrace_output(
           'cmd_line', 'sym_stack', 'unsym_stack', build_type='debug'))
コード例 #7
0
ファイル: utils_test.py プロジェクト: wdgreen/clusterfuzz
 def test_release_sym_and_unsym_diff_stacks(self):
     """Tests release build with symbolized and unsymbolized stacks, having some
 common frames, and build type not explicitly passed."""
     self.assertEqual(
         '[Command line] cmd_line\n\n' + self.start_seperator +
         'Release Build Stacktrace' + self.end_seperator +
         '\nc1\nc2\nc3\nsym_stack\nc4\nc5\nc6\n\n' + self.start_seperator +
         'Release Build Unsymbolized Stacktrace (diff)' +
         self.end_seperator + '\n\nc2\nc3\nunsym_stack\nc4\nc5',
         utils.get_crash_stacktrace_output(
             'cmd_line', 'c1\nc2\nc3\nsym_stack\nc4\nc5\nc6',
             'c1\nc2\nc3\nunsym_stack\nc4\nc5\nc6'))
コード例 #8
0
def _update_crash_result(testcase, crash_result, command):
  """Update testcase with crash result."""
  min_state = crash_result.get_symbolized_data()
  min_unsymbolized_crash_stacktrace = crash_result.get_stacktrace(
      symbolized=False)
  min_crash_stacktrace = utils.get_crash_stacktrace_output(
      command, min_state.crash_stacktrace, min_unsymbolized_crash_stacktrace)
  testcase.crash_type = min_state.crash_type
  testcase.crash_address = min_state.crash_address
  testcase.crash_state = min_state.crash_state
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      min_crash_stacktrace)
コード例 #9
0
def get_impact_on_build(build_type, current_version, testcase,
                        testcase_file_path):
  """Return impact and additional trace on a prod build given build_type."""
  # TODO(yuanjunh): remove es_enabled var after testing is done.
  es_enabled = testcase.get_metadata('es_enabled', False)
  if build_type == 'extended_stable' and not es_enabled:
    return Impact()
  build = build_manager.setup_production_build(build_type)
  if not build:
    raise BuildFailedException(
        'Build setup failed for %s' % build_type.capitalize())

  if not build_manager.check_app_path():
    raise AppFailedException()

  version = build.revision
  if version == current_version:
    return Impact(current_version, likely=False)

  app_path = environment.get_value('APP_PATH')
  command = testcase_manager.get_command_line_for_application(
      testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)

  if es_enabled:
    logs.log(
        "ES build for testcase %d, command: %s" % (testcase.key.id(), command))

  result = testcase_manager.test_for_crash_with_retries(
      testcase,
      testcase_file_path,
      environment.get_value('TEST_TIMEOUT'),
      http_flag=testcase.http_flag)

  if result.is_crash():
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace,
        build_type)
    return Impact(version, likely=False, extra_trace=stacktrace)

  return Impact()
コード例 #10
0
ファイル: analyze_task.py プロジェクト: wdgreen/clusterfuzz
def execute_task(testcase_id, job_type):
  """Run analyze task."""
  # Reset redzones.
  environment.reset_current_memory_tool_options(redzone_size=128)

  # Unset window location size and position properties so as to use default.
  environment.set_value('WINDOW_ARG', '')

  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  metadata = data_types.TestcaseUploadMetadata.query(
      data_types.TestcaseUploadMetadata.testcase_id == int(testcase_id)).get()
  if not metadata:
    logs.log_error(
        'Testcase %s has no associated upload metadata.' % testcase_id)
    testcase.key.delete()
    return

  is_lsan_enabled = environment.get_value('LSAN')
  if is_lsan_enabled:
    # Creates empty local blacklist so all leaks will be visible to uploader.
    leak_blacklist.create_empty_local_blacklist()

  # Store the bot name and timestamp in upload metadata.
  bot_name = environment.get_value('BOT_NAME')
  metadata.bot_name = bot_name
  metadata.timestamp = datetime.datetime.utcnow()
  metadata.put()

  # Adjust the test timeout, if user has provided one.
  if metadata.timeout:
    environment.set_value('TEST_TIMEOUT', metadata.timeout)

  # Adjust the number of retries, if user has provided one.
  if metadata.retries is not None:
    environment.set_value('CRASH_RETRIES', metadata.retries)

  # Setup testcase and get absolute testcase path.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase)
  if not file_list:
    return

  # Set up a custom or regular build based on revision.
  build_manager.setup_build(testcase.crash_revision)

  # Check if we have an application path. If not, our build failed
  # to setup correctly.
  app_path = environment.get_value('APP_PATH')
  if not app_path:
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         'Build setup failed')

    if data_handler.is_first_retry_for_task(testcase):
      build_fail_wait = environment.get_value('FAIL_WAIT')
      tasks.add_task(
          'analyze', testcase_id, job_type, wait_time=build_fail_wait)
    else:
      close_invalid_testcase_and_update_status(testcase, metadata,
                                               'Build setup failed')
    return

  # Update initial testcase information.
  testcase.absolute_path = testcase_file_path
  testcase.job_type = job_type
  testcase.binary_flag = utils.is_binary_file(testcase_file_path)
  testcase.queue = tasks.default_queue()
  testcase.crash_state = ''

  # Set initial testcase metadata fields (e.g. build url, etc).
  data_handler.set_initial_testcase_metadata(testcase)

  # Update minimized arguments and use ones provided during user upload.
  if not testcase.minimized_arguments:
    minimized_arguments = environment.get_value('APP_ARGS') or ''
    additional_command_line_flags = testcase.get_metadata(
        'uploaded_additional_args')
    if additional_command_line_flags:
      minimized_arguments += ' %s' % additional_command_line_flags
    environment.set_value('APP_ARGS', minimized_arguments)
    testcase.minimized_arguments = minimized_arguments

  # Update other fields not set at upload time.
  testcase.crash_revision = environment.get_value('APP_REVISION')
  data_handler.set_initial_testcase_metadata(testcase)
  testcase.put()

  # Initialize some variables.
  gestures = testcase.gestures
  http_flag = testcase.http_flag
  test_timeout = environment.get_value('TEST_TIMEOUT')

  # Get the crash output.
  result = testcase_manager.test_for_crash_with_retries(
      testcase,
      testcase_file_path,
      test_timeout,
      http_flag=http_flag,
      compare_crash=False)

  # If we don't get a crash, try enabling http to see if we can get a crash.
  # Skip engine fuzzer jobs (e.g. libFuzzer, AFL) for which http testcase paths
  # are not applicable.
  if (not result.is_crash() and not http_flag and
      not environment.is_engine_fuzzer_job()):
    result_with_http = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=True,
        compare_crash=False)
    if result_with_http.is_crash():
      logs.log('Testcase needs http flag for crash.')
      http_flag = True
      result = result_with_http

  # Refresh our object.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  # Set application command line with the correct http flag.
  application_command_line = (
      testcase_manager.get_command_line_for_application(
          testcase_file_path, needs_http=http_flag))

  # Get the crash data.
  crashed = result.is_crash()
  crash_time = result.get_crash_time()
  state = result.get_symbolized_data()
  unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)

  # Get crash info object with minidump info. Also, re-generate unsymbolized
  # stacktrace if needed.
  crash_info, _ = (
      crash_uploader.get_crash_info_and_stacktrace(
          application_command_line, state.crash_stacktrace, gestures))
  if crash_info:
    testcase.minidump_keys = crash_info.store_minidump()

  if not crashed:
    # Could not reproduce the crash.
    log_message = (
        'Testcase didn\'t crash in %d seconds (with retries)' % test_timeout)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.FINISHED, log_message)

    # For an unreproducible testcase, retry once on another bot to confirm
    # our results and in case this bot is in a bad state which we didn't catch
    # through our usual means.
    if data_handler.is_first_retry_for_task(testcase):
      testcase.status = 'Unreproducible, retrying'
      testcase.put()

      tasks.add_task('analyze', testcase_id, job_type)
      return

    # In the general case, we will not attempt to symbolize if we do not detect
    # a crash. For user uploads, we should symbolize anyway to provide more
    # information about what might be happening.
    crash_stacktrace_output = utils.get_crash_stacktrace_output(
        application_command_line, state.crash_stacktrace,
        unsymbolized_crash_stacktrace)
    testcase.crash_stacktrace = data_handler.filter_stacktrace(
        crash_stacktrace_output)
    close_invalid_testcase_and_update_status(testcase, metadata,
                                             'Unreproducible')

    # A non-reproducing testcase might still impact production branches.
    # Add the impact task to get that information.
    task_creation.create_impact_task_if_needed(testcase)
    return

  # Update http flag and re-run testcase to store dependencies (for bundled
  # archives only).
  testcase.http_flag = http_flag
  if not store_testcase_dependencies_from_bundled_testcase_archive(
      metadata, testcase, testcase_file_path):
    return

  # Update testcase crash parameters.
  testcase.crash_type = state.crash_type
  testcase.crash_address = state.crash_address
  testcase.crash_state = state.crash_state

  # Try to guess if the bug is security or not.
  security_flag = crash_analyzer.is_security_issue(
      state.crash_stacktrace, state.crash_type, state.crash_address)
  testcase.security_flag = security_flag

  # If it is, guess the severity.
  if security_flag:
    testcase.security_severity = severity_analyzer.get_security_severity(
        state.crash_type, state.crash_stacktrace, job_type, bool(gestures))

  log_message = ('Testcase crashed in %d seconds (r%d)' %
                 (crash_time, testcase.crash_revision))
  data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED,
                                       log_message)

  # See if we have to ignore this crash.
  if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
    close_invalid_testcase_and_update_status(testcase, metadata, 'Irrelavant')
    return

  # Test for reproducibility.
  one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
      testcase_file_path, state.crash_state, security_flag, test_timeout,
      http_flag, gestures)
  testcase.one_time_crasher_flag = one_time_crasher_flag

  # Check to see if this is a duplicate.
  project_name = data_handler.get_project_name(job_type)
  existing_testcase = data_handler.find_testcase(
      project_name, state.crash_type, state.crash_state, security_flag)
  if existing_testcase:
    # If the existing test case is unreproducible and we are, replace the
    # existing test case with this one.
    if existing_testcase.one_time_crasher_flag and not one_time_crasher_flag:
      duplicate_testcase = existing_testcase
      original_testcase = testcase
    else:
      duplicate_testcase = testcase
      original_testcase = existing_testcase
      metadata.status = 'Duplicate'
      metadata.duplicate_of = existing_testcase.key.id()

    duplicate_testcase.status = 'Duplicate'
    duplicate_testcase.duplicate_of = original_testcase.key.id()
    duplicate_testcase.put()

  # Set testcase and metadata status if not set already.
  if testcase.status != 'Duplicate':
    testcase.status = 'Processed'
    metadata.status = 'Confirmed'

    # Add new leaks to global blacklist to avoid detecting duplicates.
    # Only add if testcase has a direct leak crash and if it's reproducible.
    if is_lsan_enabled:
      leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase)

  # Add application specific information in the trace.
  crash_stacktrace_output = utils.get_crash_stacktrace_output(
      application_command_line, state.crash_stacktrace,
      unsymbolized_crash_stacktrace)
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      crash_stacktrace_output)

  # Update the testcase values.
  testcase.put()

  # Update the upload metadata.
  metadata.security_flag = security_flag
  metadata.put()

  # Create tasks to
  # 1. Minimize testcase (minimize).
  # 2. Find regression range (regression).
  # 3. Find testcase impact on production branches (impact).
  # 4. Check whether testcase is fixed (progression).
  # 5. Get second stacktrace from another job in case of
  #    one-time crashers (stack).
  task_creation.create_tasks(testcase)
コード例 #11
0
def find_fixed_range(testcase_id, job_type):
    """Attempt to find the revision range where a testcase was fixed."""
    deadline = tasks.get_task_completion_deadline()
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if testcase.fixed:
        logs.log_error('Fixed range is already set as %s, skip.' %
                       testcase.fixed)
        return

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase)
    if not file_list:
        return

    # Set a flag to indicate we are running progression task. This shows pending
    # status on testcase report page and avoid conflicting testcase updates by
    # triage cron.
    testcase.set_metadata('progression_pending', True)

    # Custom binaries are handled as special cases.
    if build_manager.is_custom_binary():
        _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path)
        return

    release_build_bucket_path = environment.get_value(
        'RELEASE_BUILD_BUCKET_PATH')
    revision_list = build_manager.get_revisions_list(release_build_bucket_path,
                                                     testcase=testcase)
    if not revision_list:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Failed to fetch revision list')
        tasks.add_task('progression', testcase_id, job_type)
        return

    # Use min, max_index to mark the start and end of revision list that is used
    # for bisecting the progression range. Set start to the revision where noticed
    # the crash. Set end to the trunk revision. Also, use min, max from past run
    # if it timed out.
    min_revision = testcase.get_metadata('last_progression_min')
    max_revision = testcase.get_metadata('last_progression_max')
    last_tested_revision = testcase.get_metadata('last_tested_crash_revision')
    known_crash_revision = last_tested_revision or testcase.crash_revision
    if not min_revision:
        min_revision = known_crash_revision
    if not max_revision:
        max_revision = revisions.get_last_revision_in_list(revision_list)

    min_index = revisions.find_min_revision_index(revision_list, min_revision)
    if min_index is None:
        raise errors.BuildNotFoundError(min_revision, job_type)
    max_index = revisions.find_max_revision_index(revision_list, max_revision)
    if max_index is None:
        raise errors.BuildNotFoundError(max_revision, job_type)

    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED,
                                         'r%d' % max_revision)

    # Check to see if this testcase is still crashing now. If it is, then just
    # bail out.
    result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                              job_type, max_revision)
    if result.is_crash():
        logs.log('Found crash with same signature on latest revision r%d.' %
                 max_revision)
        app_path = environment.get_value('APP_PATH')
        command = testcase_manager.get_command_line_for_application(
            testcase_file_path,
            app_path=app_path,
            needs_http=testcase.http_flag)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        stacktrace = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
            stacktrace)
        _update_completion_metadata(
            testcase,
            max_revision,
            is_crash=True,
            message='still crashes on latest revision r%s' % max_revision)

        # Since we've verified that the test case is still crashing, clear out any
        # metadata indicating potential flake from previous runs.
        task_creation.mark_unreproducible_if_flaky(testcase, False)

        # For chromium project, save latest crash information for later upload
        # to chromecrash/.
        state = result.get_symbolized_data()
        crash_uploader.save_crash_info_if_needed(testcase_id, max_revision,
                                                 job_type, state.crash_type,
                                                 state.crash_address,
                                                 state.frames)
        return

    # Don't burden NFS server with caching these random builds.
    environment.set_value('CACHE_STORE', False)

    # Verify that we do crash in the min revision. This is assumed to be true
    # while we are doing the bisect.
    result = _testcase_reproduces_in_revision(testcase, testcase_file_path,
                                              job_type, min_revision)
    if result and not result.is_crash():
        testcase = data_handler.get_testcase_by_id(testcase_id)

        # Retry once on another bot to confirm our result.
        if data_handler.is_first_retry_for_task(testcase,
                                                reset_after_retry=True):
            tasks.add_task('progression', testcase_id, job_type)
            error_message = (
                'Known crash revision %d did not crash, will retry on another bot to '
                'confirm result' % known_crash_revision)
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            _update_completion_metadata(testcase, max_revision)
            return

        _clear_progression_pending(testcase)
        error_message = ('Known crash revision %d did not crash' %
                         known_crash_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        task_creation.mark_unreproducible_if_flaky(testcase, True)
        return

    # Start a binary search to find last non-crashing revision. At this point, we
    # know that we do crash in the min_revision, and do not crash in max_revision.
    while time.time() < deadline:
        min_revision = revision_list[min_index]
        max_revision = revision_list[max_index]

        # If the min and max revisions are one apart this is as much as we can
        # narrow the range.
        if max_index - min_index == 1:
            _save_fixed_range(testcase_id, min_revision, max_revision)
            return

        # Test the middle revision of our range.
        middle_index = (min_index + max_index) // 2
        middle_revision = revision_list[middle_index]

        testcase = data_handler.get_testcase_by_id(testcase_id)
        log_message = 'Testing r%d (current range %d:%d)' % (
            middle_revision, min_revision, max_revision)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)

        try:
            result = _testcase_reproduces_in_revision(testcase,
                                                      testcase_file_path,
                                                      job_type,
                                                      middle_revision)
        except errors.BadBuildError:
            # Skip this revision.
            del revision_list[middle_index]
            max_index -= 1
            continue

        if result.is_crash():
            min_index = middle_index
        else:
            max_index = middle_index

        _save_current_fixed_range_indices(testcase_id,
                                          revision_list[min_index],
                                          revision_list[max_index])

    # If we've broken out of the loop, we've exceeded the deadline. Recreate the
    # task to pick up where we left off.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    error_message = ('Timed out, current range r%d:r%d' %
                     (revision_list[min_index], revision_list[max_index]))
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         error_message)
    tasks.add_task('progression', testcase_id, job_type)
コード例 #12
0
def execute_task(testcase_id, job_type):
    """Run a test case with a different job type to see if they reproduce."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if environment.is_engine_fuzzer_job(
            testcase.job_type) != environment.is_engine_fuzzer_job(job_type):
        # We should never reach here. But in case we do, we should bail out as
        # otherwise we will run into exceptions.
        return

    # Use a cloned testcase entity with different fuzz target paramaters for
    # a different fuzzing engine.
    original_job_type = testcase.job_type
    testcase = _get_variant_testcase_for_job(testcase, job_type)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    build_manager.setup_build()

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    if not build_manager.check_app_path():
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase,
            data_types.TaskState.ERROR,
            "Build setup failed with job: " + job_type,
        )
        return

    # Disable gestures if we're running on a different platform from that of
    # the original test case.
    use_gestures = testcase.platform == environment.platform().lower()

    # Reproduce the crash.
    app_path = environment.get_value("APP_PATH")
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    test_timeout = environment.get_value("TEST_TIMEOUT", 10)
    revision = environment.get_value("APP_REVISION")
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag,
        use_gestures=use_gestures,
        compare_crash=False,
    )

    if result.is_crash() and not result.should_ignore():
        crash_state = result.get_state()
        crash_type = result.get_type()
        security_flag = result.is_security_issue()

        gestures = testcase.gestures if use_gestures else None
        one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
            testcase.fuzzer_name,
            testcase.actual_fuzzer_name(),
            testcase_file_path,
            crash_state,
            security_flag,
            test_timeout,
            testcase.http_flag,
            gestures,
        )
        if one_time_crasher_flag:
            status = data_types.TestcaseVariantStatus.FLAKY
        else:
            status = data_types.TestcaseVariantStatus.REPRODUCIBLE

        crash_comparer = CrashComparer(crash_state, testcase.crash_state)
        is_similar = (crash_comparer.is_similar()
                      and security_flag == testcase.security_flag)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
    else:
        status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
        is_similar = False
        crash_type = None
        crash_state = None
        security_flag = False
        crash_stacktrace_output = "No crash occurred."

    if original_job_type == job_type:
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
            crash_stacktrace_output)
        testcase.set_metadata("last_tested_crash_revision",
                              revision,
                              update_testcase=True)
    else:
        # Regular case of variant analysis.
        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = status
        variant.revision = revision
        variant.crash_type = crash_type
        variant.crash_state = crash_state
        variant.security_flag = security_flag
        variant.is_similar = is_similar
        # Explicitly skipping crash stacktrace for now as it make entities larger
        # and we plan to use only crash paramaters in UI.
        variant.put()
コード例 #13
0
def check_for_bad_build(job_type, crash_revision):
    """Return true if the build is bad, i.e. crashes on startup."""
    # Check the bad build check flag to see if we want do this.
    if not environment.get_value('BAD_BUILD_CHECK'):
        return False

    # Create a blank command line with no file to run and no http.
    command = get_command_line_for_application(file_to_run='',
                                               needs_http=False)

    # When checking for bad builds, we use the default window size.
    # We don't want to pick a custom size since it can potentially cause a
    # startup crash and cause a build to be detected incorrectly as bad.
    default_window_argument = environment.get_value('WINDOW_ARG', '')
    if default_window_argument:
        command = command.replace(' %s' % default_window_argument, '')

    # TSAN is slow, and boots slow on first startup. Increase the warmup
    # timeout for this case.
    if environment.tool_matches('TSAN', job_type):
        fast_warmup_timeout = environment.get_value('WARMUP_TIMEOUT')
    else:
        fast_warmup_timeout = environment.get_value('FAST_WARMUP_TIMEOUT')

    # Initialize helper variables.
    is_bad_build = False
    build_run_console_output = ''
    app_directory = environment.get_value('APP_DIR')

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Check if the build is bad.
    return_code, crash_time, output = process_handler.run_process(
        command,
        timeout=fast_warmup_timeout,
        current_working_directory=app_directory)
    crash_result = CrashResult(return_code, crash_time, output)

    # 1. Need to account for startup crashes with no crash state. E.g. failed to
    #    load shared library. So, ignore state for comparison.
    # 2. Ignore leaks as they don't block a build from reporting regular crashes
    #    and also don't impact regression range calculations.
    if (crash_result.is_crash(ignore_state=True)
            and not crash_result.should_ignore() and
            not crash_result.get_type() in ['Direct-leak', 'Indirect-leak']):
        is_bad_build = True
        build_run_console_output = utils.get_crash_stacktrace_output(
            command, crash_result.get_stacktrace(symbolized=True),
            crash_result.get_stacktrace(symbolized=False))
        logs.log('Bad build for %s detected at r%d.' %
                 (job_type, crash_revision),
                 output=build_run_console_output)

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Any of the conditions below indicate that bot is in a bad state and it is
    # not caused by the build itself. In that case, just exit.
    build_state = data_handler.get_build_state(job_type, crash_revision)
    if is_bad_build and utils.sub_string_exists_in(BAD_STATE_HINTS, output):
        logs.log_fatal_and_exit(
            'Bad bot environment detected, exiting.',
            output=build_run_console_output,
            snapshot=process_handler.get_runtime_snapshot())

    # If none of the other bots have added information about this build,
    # then add it now.
    if (build_state == data_types.BuildState.UNMARKED
            and not crash_result.should_ignore()):
        data_handler.add_build_metadata(job_type, crash_revision, is_bad_build,
                                        build_run_console_output)

    return is_bad_build
コード例 #14
0
def get_symbolized_stacktraces(testcase_file_path, testcase,
                               old_crash_stacktrace, expected_state):
  """Use the symbolized builds to generate an updated stacktrace."""
  # Initialize variables.
  app_path = environment.get_value("APP_PATH")
  app_path_debug = environment.get_value("APP_PATH_DEBUG")
  long_test_timeout = environment.get_value("WARMUP_TIMEOUT")
  retry_limit = environment.get_value("FAIL_RETRIES")
  symbolized = False

  debug_build_stacktrace = ""
  release_build_stacktrace = old_crash_stacktrace

  # Symbolize using the debug build first so that the debug build stacktrace
  # comes after the more important release build stacktrace.
  if app_path_debug:
    for _ in range(retry_limit):
      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path,
          app_path=app_path_debug,
          needs_http=testcase.http_flag,
      )
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=long_test_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash():
        state = crash_result.get_symbolized_data()

        if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
          continue

        unsymbolized_crash_stacktrace = crash_result.get_stacktrace(
            symbolized=False)
        debug_build_stacktrace = utils.get_crash_stacktrace_output(
            command,
            state.crash_stacktrace,
            unsymbolized_crash_stacktrace,
            build_type="debug",
        )
        symbolized = True
        break

  # Symbolize using the release build.
  if app_path:
    for _ in range(retry_limit):
      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=long_test_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash():
        state = crash_result.get_symbolized_data()

        if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
          continue

        if state.crash_state != expected_state:
          continue

        # Release stack's security flag has to match the symbolized release
        # stack's security flag.
        security_flag = crash_result.is_security_issue()
        if security_flag != testcase.security_flag:
          continue

        unsymbolized_crash_stacktrace = crash_result.get_stacktrace(
            symbolized=False)
        release_build_stacktrace = utils.get_crash_stacktrace_output(
            command,
            state.crash_stacktrace,
            unsymbolized_crash_stacktrace,
            build_type="release",
        )
        symbolized = True
        break

  stacktrace = release_build_stacktrace
  if debug_build_stacktrace:
    stacktrace += "\n\n" + debug_build_stacktrace

  return symbolized, stacktrace
コード例 #15
0
ファイル: variant_task.py プロジェクト: lcytxw/clusterfuzz
def execute_task(testcase_id, job_type):
    """Run a test case with a different job type to see if they reproduce."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if (environment.is_engine_fuzzer_job(testcase.job_type) !=
            environment.is_engine_fuzzer_job(job_type)):
        # We should never reach here. But in case we do, we should bail out as
        # otherwise we will run into exceptions.
        return

    # Setup testcase and its dependencies.
    fuzzer_override = builtin_fuzzers.get_fuzzer_for_job(job_type)
    file_list, _, testcase_file_path = setup.setup_testcase(
        testcase, fuzzer_override=fuzzer_override)
    if not file_list:
        return

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    build_manager.setup_build()

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    app_path = environment.get_value('APP_PATH')
    if not app_path:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase, data_types.TaskState.ERROR,
            'Build setup failed with job: ' + job_type)
        return

    # Reproduce the crash.
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    revision = environment.get_value('APP_REVISION')
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag,
        compare_crash=False)

    if result.is_crash() and not result.should_ignore():
        crash_state = result.get_state()
        crash_type = result.get_type()
        security_flag = result.is_security_issue()

        one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
            testcase_file_path, crash_state, security_flag, test_timeout,
            testcase.http_flag, testcase.gestures)
        if one_time_crasher_flag:
            status = data_types.TestcaseVariantStatus.FLAKY
        else:
            status = data_types.TestcaseVariantStatus.REPRODUCIBLE

        crash_comparer = CrashComparer(crash_state, testcase.crash_state)
        is_similar = (crash_comparer.is_similar()
                      and security_flag == testcase.security_flag)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
    else:
        status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
        is_similar = False
        crash_type = None
        crash_state = None
        security_flag = False
        crash_stacktrace_output = 'No crash occurred.'

    testcase = data_handler.get_testcase_by_id(testcase_id)
    if testcase.job_type == job_type:
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase.last_tested_crash_stacktrace = (
            data_handler.filter_stacktrace(crash_stacktrace_output))
        testcase.set_metadata('last_tested_crash_revision',
                              revision,
                              update_testcase=False)
    else:
        # Regular case of variant analysis.
        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = status
        variant.revision = revision
        variant.crash_type = crash_type
        variant.crash_state = crash_state
        variant.security_flag = security_flag
        variant.is_similar = is_similar
        # Explicitly skipping crash stacktrace for now as it make entities larger
        # and we plan to use only crash paramaters in UI.
        variant.put()
コード例 #16
0
ファイル: stack_task.py プロジェクト: zzdxxd/clusterfuzz
def execute_task(testcase_id, job_type):
    """Run a test case with a second job type to generate a second stack trace."""
    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase)
    if not file_list:
        return

    # Initialize timeout values.
    test_timeout = environment.get_value('TEST_TIMEOUT', 10)

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    build_manager.setup_build()

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    app_path = environment.get_value('APP_PATH')
    if not app_path:
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')
        return

    # TSAN tool settings (if the tool is used).
    if environment.tool_matches('TSAN', job_type):
        environment.set_tsan_max_history_size()

    command = tests.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    result = tests.test_for_crash_with_retries(testcase,
                                               testcase_file_path,
                                               test_timeout,
                                               http_flag=testcase.http_flag,
                                               compare_crash=False)

    # Get revision information.
    revision = environment.get_value('APP_REVISION')

    # If a crash occurs, then we add the second stacktrace information.
    if result.is_crash():
        state = result.get_symbolized_data()
        security_flag = result.is_security_issue()
        one_time_crasher_flag = not tests.test_for_reproducibility(
            testcase_file_path, state.crash_state, security_flag, test_timeout,
            testcase.http_flag, testcase.gestures)

        # Attach a header to indicate information on reproducibility flag.
        if one_time_crasher_flag:
            crash_stacktrace_header = 'Unreliable'
        else:
            crash_stacktrace_header = 'Fully reproducible'
        crash_stacktrace_header += (' crash found using %s job.\n\n' %
                                    job_type)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        stacktrace = utils.get_crash_stacktrace_output(
            command, state.crash_stacktrace, unsymbolized_crash_stacktrace)

        crash_stacktrace = data_handler.filter_stacktrace(
            '%s%s' % (crash_stacktrace_header, stacktrace))
    else:
        crash_stacktrace = 'No crash found using %s job.' % job_type

    # Decide which stacktrace to update this stacktrace with.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if testcase.last_tested_crash_stacktrace == 'Pending':
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase.last_tested_crash_stacktrace = crash_stacktrace
        testcase.set_metadata('last_tested_crash_revision',
                              revision,
                              update_testcase=False)
    else:
        # Default case when someone defines |SECOND_STACK_JOB_TYPE| in the job
        # type. This helps to test the unreproducible crash with a different memory
        # debugging tool to get a second stacktrace (e.g. running TSAN on a flaky
        # crash found in ASAN build).
        testcase.second_crash_stacktrace = crash_stacktrace
        testcase.set_metadata('second_crash_stacktrace_revision',
                              revision,
                              update_testcase=False)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)