Esempio n. 1
0
def get_impact_on_build(build_type, current_version, testcase,
                        testcase_file_path):
  """Return impact and additional trace on a prod build given build_type."""
  build = build_manager.setup_production_build(build_type)
  if not build:
    raise BuildFailedException(
        'Build setup failed for %s' % build_type.capitalize())

  if not build_manager.check_app_path():
    raise AppFailedException()

  version = build.revision
  if version == current_version:
    return Impact(current_version, likely=False)

  app_path = environment.get_value('APP_PATH')
  command = testcase_manager.get_command_line_for_application(
      testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
  result = testcase_manager.test_for_crash_with_retries(
      testcase,
      testcase_file_path,
      environment.get_value('TEST_TIMEOUT'),
      http_flag=testcase.http_flag)

  if result.is_crash():
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace,
        build_type)
    return Impact(version, likely=False, extra_trace=stacktrace)

  return Impact()
Esempio n. 2
0
def _testcase_reproduces_in_revision(testcase,
                                     testcase_file_path,
                                     job_type,
                                     revision,
                                     update_metadata=False):
    """Test to see if a test case reproduces in the specified revision."""
    build_manager.setup_build(revision)
    if not build_manager.check_app_path():
        raise errors.BuildSetupError(revision, job_type)

    if testcase_manager.check_for_bad_build(job_type, revision):
        log_message = 'Bad build at r%d. Skipping' % revision
        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)
        raise errors.BadBuildError(revision, job_type)

    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag)
    _log_output(revision, result)

    if update_metadata:
        _update_issue_metadata(testcase)

    return result
 def test_test_for_crash_with_retries_blackbox_fail(self):
     """Test test_for_crash_with_retries failing to reproduce a crash
 (blackbox)."""
     crash_result = testcase_manager.test_for_crash_with_retries(
         self.blackbox_testcase, '/fuzz-testcase', 10)
     self.assertEqual(0, crash_result.return_code)
     self.assertEqual(0, crash_result.crash_time)
     self.assertEqual('output', crash_result.output)
     self.assertEqual(3, self.mock.run_process.call_count)
     self.mock.run_process.assert_has_calls([
         mock.call('/build_dir/app_name -arg1 -arg2',
                   current_working_directory='/build_dir',
                   gestures=[],
                   timeout=120),
         mock.call('/build_dir/app_name -arg1 -arg2',
                   current_working_directory='/build_dir',
                   gestures=[],
                   timeout=10),
         mock.call('/build_dir/app_name -arg1 -arg2',
                   current_working_directory='/build_dir',
                   gestures=[],
                   timeout=10),
     ])
     self.mock.log.assert_has_calls([
         mock.call('No crash occurred (round 1).', output='output'),
         mock.call('No crash occurred (round 2).', output='output'),
         mock.call('No crash occurred (round 3).', output='output'),
         mock.call("Didn't crash at all.")
     ])
    def test_test_for_crash_with_retries_greybox_fail(self):
        """Test test_for_crash_with_retries failing to reproduce a crash
    (greybox)."""
        mock_engine = mock.Mock()
        mock_engine.reproduce.return_value = engine.ReproduceResult(['cmd'], 0,
                                                                    0,
                                                                    'output')
        self.mock.get.return_value = mock_engine

        crash_result = testcase_manager.test_for_crash_with_retries(
            self.greybox_testcase, '/fuzz-testcase', 10)
        self.assertEqual(0, crash_result.return_code)
        self.assertEqual(0, crash_result.crash_time)
        self.assertEqual(self.GREYBOX_FUZZER_NO_CRASH, crash_result.output)
        self.assertEqual(3, mock_engine.reproduce.call_count)
        mock_engine.reproduce.assert_has_calls([
            mock.call('/build_dir/target', '/fuzz-testcase',
                      ['-arg1', '-arg2'], 120),
            mock.call('/build_dir/target', '/fuzz-testcase',
                      ['-arg1', '-arg2'], 10),
            mock.call('/build_dir/target', '/fuzz-testcase',
                      ['-arg1', '-arg2'], 10),
        ])
        self.mock.log.assert_has_calls([
            mock.call('No crash occurred (round 1).',
                      output=self.GREYBOX_FUZZER_NO_CRASH),
            mock.call('No crash occurred (round 2).',
                      output=self.GREYBOX_FUZZER_NO_CRASH),
            mock.call('No crash occurred (round 3).',
                      output=self.GREYBOX_FUZZER_NO_CRASH),
            mock.call("Didn't crash at all.")
        ])
    def test_test_for_crash_with_retries_blackbox_succeed_no_comparison(self):
        """Test test_for_crash_with_retries reproducing a crash with compare_crash
    set to False (blackbox)."""
        self.mock.run_process.side_effect = [
            (0, 0, 'output'),
            (1, 1, 'crash'),
        ]

        crash_result = testcase_manager.test_for_crash_with_retries(
            self.blackbox_testcase, '/fuzz-testcase', 10, compare_crash=False)
        self.assertEqual(1, crash_result.return_code)
        self.assertEqual(1, crash_result.crash_time)
        self.assertEqual('crash', crash_result.output)
        self.assertEqual(2, self.mock.run_process.call_count)

        self.mock.run_process.assert_has_calls([
            mock.call('/build_dir/app_name -arg1 -arg2',
                      current_working_directory='/build_dir',
                      gestures=[],
                      timeout=120),
            mock.call('/build_dir/app_name -arg1 -arg2',
                      current_working_directory='/build_dir',
                      gestures=[],
                      timeout=10),
        ])
        self.mock.log.assert_has_calls([
            mock.call('No crash occurred (round 1).', output='output'),
            mock.call('Crash occurred in 1 seconds (round 2). State:\nstate',
                      output='crash'),
            mock.call('Crash stacktrace comparison skipped.')
        ])
    def test_test_for_crash_with_retries_greybox_succeed_no_comparison(self):
        """Test test_for_crash_with_retries reproducing a crash with compare_crash
    set to False (greybox)."""
        mock_engine = mock.Mock()
        mock_engine.reproduce.side_effect = [
            engine.ReproduceResult(['cmd'], 0, 0, 'output'),
            engine.ReproduceResult(['cmd'], 1, 1, 'crash'),
        ]
        self.mock.get.return_value = mock_engine

        crash_result = testcase_manager.test_for_crash_with_retries(
            self.greybox_testcase, '/fuzz-testcase', 10, compare_crash=False)
        self.assertEqual(1, crash_result.return_code)
        self.assertEqual(1, crash_result.crash_time)
        self.assertEqual(self.GREYBOX_FUZZER_CRASH, crash_result.output)
        self.assertEqual(2, mock_engine.reproduce.call_count)
        mock_engine.reproduce.assert_has_calls([
            mock.call('/build_dir/target', '/fuzz-testcase',
                      ['-arg1', '-arg2'], 120),
            mock.call('/build_dir/target', '/fuzz-testcase',
                      ['-arg1', '-arg2'], 10),
        ])
        self.mock.log.assert_has_calls([
            mock.call('No crash occurred (round 1).',
                      output=self.GREYBOX_FUZZER_NO_CRASH),
            mock.call('Crash occurred in 1 seconds (round 2). State:\nstate',
                      output=self.GREYBOX_FUZZER_CRASH),
            mock.call('Crash stacktrace comparison skipped.')
        ])
Esempio n. 7
0
  def test_test_for_crash_with_retries_blackbox_succeed(self):
    """Test test_for_crash_with_retries reproducing a crash (blackbox)."""
    self.mock.run_process.side_effect = [
        (0, 0, 'nope'),
        (1, 1, 'crash'),
    ]

    crash_result = testcase_manager.test_for_crash_with_retries(
        self.blackbox_testcase, '/fuzz-testcase', 10)
    self.assertEqual(1, crash_result.return_code)
    self.assertEqual(1, crash_result.crash_time)
    self.assertEqual('crash', crash_result.output)
    self.assertEqual(2, self.mock.run_process.call_count)

    self.mock.run_process.assert_has_calls([
        mock.call(
            '/build_dir/app_name -arg1 -arg2',
            current_working_directory='/build_dir',
            gestures=[],
            timeout=120),
        mock.call(
            '/build_dir/app_name -arg1 -arg2',
            current_working_directory='/build_dir',
            gestures=[],
            timeout=10),
    ])
def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):
  """Simplified fixed check for test cases using custom binaries."""
  revision = environment.get_value('APP_REVISION')

  # Update comments to reflect bot information and clean up old comments.
  testcase_id = testcase.key.id()
  testcase = data_handler.get_testcase_by_id(testcase_id)
  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  build_manager.setup_build()
  if not build_manager.check_app_path():
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.ERROR,
        'Build setup failed for custom binary')
    build_fail_wait = environment.get_value('FAIL_WAIT')
    tasks.add_task(
        'progression', testcase_id, job_type, wait_time=build_fail_wait)
    return

  test_timeout = environment.get_value('TEST_TIMEOUT', 10)
  result = testcase_manager.test_for_crash_with_retries(
      testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
  _log_output(revision, result)

  # Re-fetch to finalize testcase updates in branches below.
  testcase = data_handler.get_testcase_by_id(testcase.key.id())

  # If this still crashes on the most recent build, it's not fixed. The task
  # will be rescheduled by a cron job and re-attempted eventually.
  if result.is_crash():
    app_path = environment.get_value('APP_PATH')
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
    testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
        stacktrace)
    _update_completion_metadata(
        testcase,
        revision,
        is_crash=True,
        message='still crashes on latest custom build')
    return

  # Retry once on another bot to confirm our results and in case this bot is in
  # a bad state which we didn't catch through our usual means.
  if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
    tasks.add_task('progression', testcase_id, job_type)
    _update_completion_metadata(testcase, revision)
    return

  # The bug is fixed.
  testcase.fixed = 'Yes'
  testcase.open = False
  _update_completion_metadata(
      testcase, revision, message='fixed on latest custom build')
Esempio n. 9
0
  def test_test_for_crash_with_retries_greybox_legacy(self):
    """Test test_for_crash_with_retries reproducing a legacy crash (greybox)."""
    mock_engine = mock.Mock()
    mock_engine.reproduce.side_effect = [
        engine.ReproduceResult(['cmd'], 1, 1, 'crash'),
    ]
    self.mock.get.return_value = mock_engine

    with open('/flags-testcase', 'w') as f:
      f.write('%TESTCASE% target -arg1 -arg2')

    testcase_manager.test_for_crash_with_retries(self.greybox_testcase,
                                                 '/fuzz-testcase', 10)
    mock_engine.reproduce.assert_has_calls([
        mock.call('/build_dir/target', '/fuzz-testcase', ['-arg1', '-arg2'],
                  120),
    ])
    def test_test_for_crash_with_retries_greybox_legacy(self):
        """Test test_for_crash_with_retries reproducing a legacy crash (greybox)."""
        mock_engine = mock.Mock()
        mock_engine.reproduce.side_effect = [
            engine.ReproduceResult(['cmd'], 1, 1, 'crash'),
        ]
        self.mock.get.return_value = mock_engine

        with open('/flags-testcase', 'w') as f:
            f.write('%TESTCASE% target -arg1 -arg2')

        testcase_manager.test_for_crash_with_retries(self.greybox_testcase,
                                                     '/fuzz-testcase', 10)
        mock_engine.reproduce.assert_has_calls([
            mock.call('/build_dir/target', '/fuzz-testcase',
                      ['-arg1', '-arg2'], 120),
        ])
        self.mock.log.assert_has_calls([
            mock.call('Crash occurred in 1 seconds (round 1). State:\nstate',
                      output=self.GREYBOX_FUZZER_CRASH),
            mock.call('Crash stacktrace is similar to original stacktrace.')
        ])
Esempio n. 11
0
def _reproduce_crash(testcase_url, build_directory, iterations, disable_xvfb):
    """Reproduce a crash."""
    _prepare_initial_environment(build_directory, iterations)

    # Validate the test case URL and fetch the tool's configuration.
    testcase_id = _get_testcase_id_from_url(testcase_url)
    configuration = config.ReproduceToolConfiguration(testcase_url)

    testcase = _get_testcase(testcase_id, configuration)

    # Ensure that we support this test case.
    if testcase.platform not in SUPPORTED_PLATFORMS:
        raise errors.ReproduceToolUnrecoverableError(
            'The reproduce tool is not yet supported on {platform}.'.format(
                platform=testcase.platform))

    testcase_path = _download_testcase(testcase_id, testcase, configuration)
    _update_environment_for_testcase(testcase, build_directory)

    # Validate that we're running on the right platform for this test case.
    platform = environment.platform().lower()
    if testcase.platform == 'android' and platform == 'linux':
        _prepare_environment_for_android()
    elif testcase.platform == 'android' and platform != 'linux':
        raise errors.ReproduceToolUnrecoverableError(
            'The ClusterFuzz environment only supports running Android test cases '
            'on Linux host machines. Unable to reproduce the test case on '
            '{current_platform}.'.format(current_platform=platform))
    elif testcase.platform != platform:
        raise errors.ReproduceToolUnrecoverableError(
            'The specified test case was discovered on {testcase_platform}. '
            'Unable to attempt to reproduce it on {current_platform}.'.format(
                testcase_platform=testcase.platform,
                current_platform=platform))

    x_processes = []
    if not disable_xvfb:
        _setup_x()
    timeout = environment.get_value('TEST_TIMEOUT')

    print('Running testcase...')
    result = testcase_manager.test_for_crash_with_retries(
        testcase, testcase_path, timeout)

    # Terminate Xvfb and blackbox.
    for process in x_processes:
        process.terminate()

    return result
Esempio n. 12
0
def get_impact_on_build(build_type, current_version, testcase,
                        testcase_file_path):
  """Return impact and additional trace on a prod build given build_type."""
  # TODO(yuanjunh): remove es_enabled var after testing is done.
  es_enabled = testcase.get_metadata('es_enabled', False)
  if build_type == 'extended_stable' and not es_enabled:
    return Impact()
  build = build_manager.setup_production_build(build_type)
  if not build:
    raise BuildFailedException(
        'Build setup failed for %s' % build_type.capitalize())

  if not build_manager.check_app_path():
    raise AppFailedException()

  version = build.revision
  if version == current_version:
    return Impact(current_version, likely=False)

  app_path = environment.get_value('APP_PATH')
  command = testcase_manager.get_command_line_for_application(
      testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)

  if es_enabled:
    logs.log(
        "ES build for testcase %d, command: %s" % (testcase.key.id(), command))

  result = testcase_manager.test_for_crash_with_retries(
      testcase,
      testcase_file_path,
      environment.get_value('TEST_TIMEOUT'),
      http_flag=testcase.http_flag)

  if result.is_crash():
    symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
    stacktrace = utils.get_crash_stacktrace_output(
        command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace,
        build_type)
    return Impact(version, likely=False, extra_trace=stacktrace)

  return Impact()
Esempio n. 13
0
  def test_test_for_crash_with_retries_greybox_succeed(self):
    """Test test_for_crash_with_retries reproducing a crash (greybox)."""
    mock_engine = mock.Mock()
    mock_engine.reproduce.side_effect = [
        engine.ReproduceResult(['cmd'], 0, 0, 'output'),
        engine.ReproduceResult(['cmd'], 1, 0, 'crash'),
    ]
    self.mock.get.return_value = mock_engine

    crash_result = testcase_manager.test_for_crash_with_retries(
        self.greybox_testcase, '/fuzz-testcase', 10)
    self.assertEqual(1, crash_result.return_code)
    self.assertEqual(0, crash_result.crash_time)
    self.assertEqual(self.EXPECTED_HEADER + 'crash', crash_result.output)
    self.assertEqual(2, mock_engine.reproduce.call_count)
    mock_engine.reproduce.assert_has_calls([
        mock.call('/build_dir/target', '/fuzz-testcase', ['-arg1', '-arg2'],
                  120),
        mock.call('/build_dir/target', '/fuzz-testcase', ['-arg1', '-arg2'],
                  10),
    ])
    def test_test_for_crash_with_retries_greybox_fail(self):
        """Test test_for_crash_with_retries failing to reproduce a crash
    (greybox)."""
        mock_engine = mock.Mock()
        mock_engine.reproduce.return_value = engine.ReproduceResult(
            0, 0, 'output')
        self.mock.get.return_value = mock_engine

        crash_result = testcase_manager.test_for_crash_with_retries(
            self.testcase, '/fuzz-testcase', 10)
        self.assertEqual(0, crash_result.return_code)
        self.assertEqual(0, crash_result.crash_time)
        self.assertEqual('output', crash_result.output)
        self.assertEqual(3, mock_engine.reproduce.call_count)
        mock_engine.reproduce.assert_has_calls([
            mock.call('/build_dir/target', '/fuzz-testcase',
                      ['-arg1', '-arg2'], 120),
            mock.call('/build_dir/target', '/fuzz-testcase',
                      ['-arg1', '-arg2'], 10),
            mock.call('/build_dir/target', '/fuzz-testcase',
                      ['-arg1', '-arg2'], 10),
        ])
Esempio n. 15
0
def _testcase_reproduces_in_revision(testcase,
                                     testcase_file_path,
                                     job_type,
                                     revision,
                                     should_log=True,
                                     min_revision=None,
                                     max_revision=None):
    """Test to see if a test case reproduces in the specified revision."""
    if should_log:
        log_message = 'Testing r%d' % revision
        if min_revision is not None and max_revision is not None:
            log_message += ' (current range %d:%d)' % (min_revision,
                                                       max_revision)

        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)

    build_manager.setup_build(revision)
    app_path = environment.get_value('APP_PATH')
    if not app_path:
        raise errors.BuildSetupError(revision, job_type)

    if testcase_manager.check_for_bad_build(job_type, revision):
        log_message = 'Bad build at r%d. Skipping' % revision
        testcase = data_handler.get_testcase_by_id(testcase.key.id())
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.WIP,
                                             log_message)
        raise errors.BadBuildError(revision, job_type)

    test_timeout = environment.get_value('TEST_TIMEOUT', 10)
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag)
    return result.is_crash()
Esempio n. 16
0
def execute_task(testcase_id, job_type):
    """Run analyze task."""
    # Reset redzones.
    environment.reset_current_memory_tool_options(redzone_size=128)

    # Unset window location size and position properties so as to use default.
    environment.set_value('WINDOW_ARG', '')

    # Locate the testcase associated with the id.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.STARTED)

    metadata = data_types.TestcaseUploadMetadata.query(
        data_types.TestcaseUploadMetadata.testcase_id == int(
            testcase_id)).get()
    if not metadata:
        logs.log_error('Testcase %s has no associated upload metadata.' %
                       testcase_id)
        testcase.key.delete()
        return

    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # Creates empty local blacklist so all leaks will be visible to uploader.
        leak_blacklist.create_empty_local_blacklist()

    # Store the bot name and timestamp in upload metadata.
    bot_name = environment.get_value('BOT_NAME')
    metadata.bot_name = bot_name
    metadata.timestamp = datetime.datetime.utcnow()
    metadata.put()

    # Adjust the test timeout, if user has provided one.
    if metadata.timeout:
        environment.set_value('TEST_TIMEOUT', metadata.timeout)

    # Adjust the number of retries, if user has provided one.
    if metadata.retries is not None:
        environment.set_value('CRASH_RETRIES', metadata.retries)

    # Setup testcase and get absolute testcase path.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set up a custom or regular build based on revision.
    build_manager.setup_build(testcase.crash_revision)

    # Check if we have an application path. If not, our build failed
    # to setup correctly.
    if not build_manager.check_app_path():
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             'Build setup failed')

        if data_handler.is_first_retry_for_task(testcase):
            build_fail_wait = environment.get_value('FAIL_WAIT')
            tasks.add_task('analyze',
                           testcase_id,
                           job_type,
                           wait_time=build_fail_wait)
        else:
            close_invalid_testcase_and_update_status(testcase, metadata,
                                                     'Build setup failed')
        return

    # Update initial testcase information.
    testcase.absolute_path = testcase_file_path
    testcase.job_type = job_type
    testcase.binary_flag = utils.is_binary_file(testcase_file_path)
    testcase.queue = tasks.default_queue()
    testcase.crash_state = ''

    # Set initial testcase metadata fields (e.g. build url, etc).
    data_handler.set_initial_testcase_metadata(testcase)

    # Update minimized arguments and use ones provided during user upload.
    if not testcase.minimized_arguments:
        minimized_arguments = environment.get_value('APP_ARGS') or ''
        additional_command_line_flags = testcase.get_metadata(
            'uploaded_additional_args')
        if additional_command_line_flags:
            minimized_arguments += ' %s' % additional_command_line_flags
        environment.set_value('APP_ARGS', minimized_arguments)
        testcase.minimized_arguments = minimized_arguments

    # Update other fields not set at upload time.
    testcase.crash_revision = environment.get_value('APP_REVISION')
    data_handler.set_initial_testcase_metadata(testcase)
    testcase.put()

    # Initialize some variables.
    gestures = testcase.gestures
    http_flag = testcase.http_flag
    test_timeout = environment.get_value('TEST_TIMEOUT')

    # Get the crash output.
    result = testcase_manager.test_for_crash_with_retries(testcase,
                                                          testcase_file_path,
                                                          test_timeout,
                                                          http_flag=http_flag,
                                                          compare_crash=False)

    # If we don't get a crash, try enabling http to see if we can get a crash.
    # Skip engine fuzzer jobs (e.g. libFuzzer, AFL) for which http testcase paths
    # are not applicable.
    if (not result.is_crash() and not http_flag
            and not environment.is_engine_fuzzer_job()):
        result_with_http = testcase_manager.test_for_crash_with_retries(
            testcase,
            testcase_file_path,
            test_timeout,
            http_flag=True,
            compare_crash=False)
        if result_with_http.is_crash():
            logs.log('Testcase needs http flag for crash.')
            http_flag = True
            result = result_with_http

    # Refresh our object.
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    # Set application command line with the correct http flag.
    application_command_line = (
        testcase_manager.get_command_line_for_application(
            testcase_file_path, needs_http=http_flag))

    # Get the crash data.
    crashed = result.is_crash()
    crash_time = result.get_crash_time()
    state = result.get_symbolized_data()
    unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)

    # Get crash info object with minidump info. Also, re-generate unsymbolized
    # stacktrace if needed.
    crash_info, _ = (crash_uploader.get_crash_info_and_stacktrace(
        application_command_line, state.crash_stacktrace, gestures))
    if crash_info:
        testcase.minidump_keys = crash_info.store_minidump()

    if not crashed:
        # Could not reproduce the crash.
        log_message = ('Testcase didn\'t crash in %d seconds (with retries)' %
                       test_timeout)
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.FINISHED,
                                             log_message)

        # In the general case, we will not attempt to symbolize if we do not detect
        # a crash. For user uploads, we should symbolize anyway to provide more
        # information about what might be happening.
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            application_command_line, state.crash_stacktrace,
            unsymbolized_crash_stacktrace)
        testcase.crash_stacktrace = data_handler.filter_stacktrace(
            crash_stacktrace_output)

        # For an unreproducible testcase, retry once on another bot to confirm
        # our results and in case this bot is in a bad state which we didn't catch
        # through our usual means.
        if data_handler.is_first_retry_for_task(testcase):
            testcase.status = 'Unreproducible, retrying'
            testcase.put()

            tasks.add_task('analyze', testcase_id, job_type)
            return

        close_invalid_testcase_and_update_status(testcase, metadata,
                                                 'Unreproducible')

        # A non-reproducing testcase might still impact production branches.
        # Add the impact task to get that information.
        task_creation.create_impact_task_if_needed(testcase)
        return

    # Update http flag and re-run testcase to store dependencies (for bundled
    # archives only).
    testcase.http_flag = http_flag
    if not store_testcase_dependencies_from_bundled_testcase_archive(
            metadata, testcase, testcase_file_path):
        return

    # Update testcase crash parameters.
    testcase.crash_type = state.crash_type
    testcase.crash_address = state.crash_address
    testcase.crash_state = state.crash_state
    crash_stacktrace_output = utils.get_crash_stacktrace_output(
        application_command_line, state.crash_stacktrace,
        unsymbolized_crash_stacktrace)
    testcase.crash_stacktrace = data_handler.filter_stacktrace(
        crash_stacktrace_output)

    # Try to guess if the bug is security or not.
    security_flag = crash_analyzer.is_security_issue(state.crash_stacktrace,
                                                     state.crash_type,
                                                     state.crash_address)
    testcase.security_flag = security_flag

    # If it is, guess the severity.
    if security_flag:
        testcase.security_severity = severity_analyzer.get_security_severity(
            state.crash_type, state.crash_stacktrace, job_type, bool(gestures))

    log_message = ('Testcase crashed in %d seconds (r%d)' %
                   (crash_time, testcase.crash_revision))
    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED,
                                         log_message)

    # See if we have to ignore this crash.
    if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
        close_invalid_testcase_and_update_status(testcase, metadata,
                                                 'Irrelavant')
        return

    # Test for reproducibility.
    one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
        testcase.fuzzer_name, testcase.actual_fuzzer_name(),
        testcase_file_path, state.crash_state, security_flag, test_timeout,
        http_flag, gestures)
    testcase.one_time_crasher_flag = one_time_crasher_flag

    # Check to see if this is a duplicate.
    project_name = data_handler.get_project_name(job_type)
    existing_testcase = data_handler.find_testcase(project_name,
                                                   state.crash_type,
                                                   state.crash_state,
                                                   security_flag)
    if existing_testcase:
        # If the existing test case is unreproducible and we are, replace the
        # existing test case with this one.
        if existing_testcase.one_time_crasher_flag and not one_time_crasher_flag:
            duplicate_testcase = existing_testcase
            original_testcase = testcase
        else:
            duplicate_testcase = testcase
            original_testcase = existing_testcase
            metadata.status = 'Duplicate'
            metadata.duplicate_of = existing_testcase.key.id()

        duplicate_testcase.status = 'Duplicate'
        duplicate_testcase.duplicate_of = original_testcase.key.id()
        duplicate_testcase.put()

    # Set testcase and metadata status if not set already.
    if testcase.status != 'Duplicate':
        testcase.status = 'Processed'
        metadata.status = 'Confirmed'

        # Add new leaks to global blacklist to avoid detecting duplicates.
        # Only add if testcase has a direct leak crash and if it's reproducible.
        if is_lsan_enabled:
            leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase)

    # Update the testcase values.
    testcase.put()

    # Update the upload metadata.
    metadata.security_flag = security_flag
    metadata.put()

    # Create tasks to
    # 1. Minimize testcase (minimize).
    # 2. Find regression range (regression).
    # 3. Find testcase impact on production branches (impact).
    # 4. Check whether testcase is fixed (progression).
    # 5. Get second stacktrace from another job in case of
    #    one-time crashers (stack).
    task_creation.create_tasks(testcase)
Esempio n. 17
0
def _reproduce_crash(
    testcase_url,
    build_directory,
    iterations,
    disable_xvfb,
    verbose,
    disable_android_setup,
):
    """Reproduce a crash."""
    _prepare_initial_environment(build_directory, iterations, verbose)

    # Validate the test case URL and fetch the tool's configuration.
    testcase_id = _get_testcase_id_from_url(testcase_url)
    configuration = config.ReproduceToolConfiguration(testcase_url)

    testcase = _get_testcase(testcase_id, configuration)

    # For new user uploads, we'll fail without the metadata set by analyze task.
    if not testcase.platform:
        raise errors.ReproduceToolUnrecoverableError(
            "This test case has not yet been processed. Please try again later."
        )

    # Ensure that we support this test case's platform.
    if testcase.platform not in SUPPORTED_PLATFORMS:
        raise errors.ReproduceToolUnrecoverableError(
            "The reproduce tool is not yet supported on {platform}.".format(
                platform=testcase.platform))

    # Print warnings for this test case.
    if testcase.one_time_crasher_flag:
        print("Warning: this test case was a one-time crash. It may not be "
              "reproducible.")
    if testcase.flaky_stack:
        print("Warning: this test case is known to crash with different stack "
              "traces.")

    testcase_path = _download_testcase(testcase_id, testcase, configuration)
    _update_environment_for_testcase(testcase, build_directory)

    # Validate that we're running on the right platform for this test case.
    platform = environment.platform().lower()
    if testcase.platform == "android" and platform == "linux":
        android.prepare_environment(disable_android_setup)
    elif testcase.platform == "android" and platform != "linux":
        raise errors.ReproduceToolUnrecoverableError(
            "The ClusterFuzz environment only supports running Android test cases "
            "on Linux host machines. Unable to reproduce the test case on "
            "{current_platform}.".format(current_platform=platform))
    elif testcase.platform != platform:
        raise errors.ReproduceToolUnrecoverableError(
            "The specified test case was discovered on {testcase_platform}. "
            "Unable to attempt to reproduce it on {current_platform}.".format(
                testcase_platform=testcase.platform,
                current_platform=platform))

    x_processes = []
    if not disable_xvfb:
        _setup_x()
    timeout = environment.get_value("TEST_TIMEOUT")

    print("Running testcase...")
    try:
        result = testcase_manager.test_for_crash_with_retries(testcase,
                                                              testcase_path,
                                                              timeout,
                                                              crash_retries=1)

        # If we can't reproduce the crash, prompt the user to try again.
        if not result.is_crash():
            _print_stacktrace(result)
            result = None
            use_default_retries = prompts.get_boolean(
                "Failed to find the desired crash on first run. Re-run "
                "{crash_retries} times?".format(
                    crash_retries=environment.get_value("CRASH_RETRIES")))
            if use_default_retries:
                print(
                    "Attempting to reproduce test case. This may take a while..."
                )
                result = testcase_manager.test_for_crash_with_retries(
                    testcase, testcase_path, timeout)

    except KeyboardInterrupt:
        print("Aborting...")
        result = None

    # Terminate Xvfb and blackbox.
    for process in x_processes:
        process.terminate()

    return result
Esempio n. 18
0
def execute_task(testcase_id, job_type):
    """Run a test case with a different job type to see if they reproduce."""
    testcase = data_handler.get_testcase_by_id(testcase_id)
    if not testcase:
        return

    if environment.is_engine_fuzzer_job(
            testcase.job_type) != environment.is_engine_fuzzer_job(job_type):
        # We should never reach here. But in case we do, we should bail out as
        # otherwise we will run into exceptions.
        return

    # Use a cloned testcase entity with different fuzz target paramaters for
    # a different fuzzing engine.
    original_job_type = testcase.job_type
    testcase = _get_variant_testcase_for_job(testcase, job_type)

    # Setup testcase and its dependencies.
    file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
    if not file_list:
        return

    # Set up a custom or regular build. We explicitly omit the crash revision
    # since we want to test against the latest build here.
    build_manager.setup_build()

    # Check if we have an application path. If not, our build failed to setup
    # correctly.
    if not build_manager.check_app_path():
        testcase = data_handler.get_testcase_by_id(testcase_id)
        data_handler.update_testcase_comment(
            testcase,
            data_types.TaskState.ERROR,
            "Build setup failed with job: " + job_type,
        )
        return

    # Disable gestures if we're running on a different platform from that of
    # the original test case.
    use_gestures = testcase.platform == environment.platform().lower()

    # Reproduce the crash.
    app_path = environment.get_value("APP_PATH")
    command = testcase_manager.get_command_line_for_application(
        testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
    test_timeout = environment.get_value("TEST_TIMEOUT", 10)
    revision = environment.get_value("APP_REVISION")
    result = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=testcase.http_flag,
        use_gestures=use_gestures,
        compare_crash=False,
    )

    if result.is_crash() and not result.should_ignore():
        crash_state = result.get_state()
        crash_type = result.get_type()
        security_flag = result.is_security_issue()

        gestures = testcase.gestures if use_gestures else None
        one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
            testcase.fuzzer_name,
            testcase.actual_fuzzer_name(),
            testcase_file_path,
            crash_state,
            security_flag,
            test_timeout,
            testcase.http_flag,
            gestures,
        )
        if one_time_crasher_flag:
            status = data_types.TestcaseVariantStatus.FLAKY
        else:
            status = data_types.TestcaseVariantStatus.REPRODUCIBLE

        crash_comparer = CrashComparer(crash_state, testcase.crash_state)
        is_similar = (crash_comparer.is_similar()
                      and security_flag == testcase.security_flag)

        unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
        symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
        crash_stacktrace_output = utils.get_crash_stacktrace_output(
            command, symbolized_crash_stacktrace,
            unsymbolized_crash_stacktrace)
    else:
        status = data_types.TestcaseVariantStatus.UNREPRODUCIBLE
        is_similar = False
        crash_type = None
        crash_state = None
        security_flag = False
        crash_stacktrace_output = "No crash occurred."

    if original_job_type == job_type:
        # This case happens when someone clicks 'Update last tested stacktrace using
        # trunk build' button.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(
            crash_stacktrace_output)
        testcase.set_metadata("last_tested_crash_revision",
                              revision,
                              update_testcase=True)
    else:
        # Regular case of variant analysis.
        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = status
        variant.revision = revision
        variant.crash_type = crash_type
        variant.crash_state = crash_state
        variant.security_flag = security_flag
        variant.is_similar = is_similar
        # Explicitly skipping crash stacktrace for now as it make entities larger
        # and we plan to use only crash paramaters in UI.
        variant.put()