Пример #1
0
 def test(self):
     """Test that ASan instrumentation can be successfully set up on device."""
     adb.revert_asan_device_setup_if_needed()
     environment.reset_current_memory_tool_options()
     sanitizer.setup_asan_if_needed()
     self.assertEqual(0, self.mock.log_error.call_count)
     self.assertTrue(adb.file_exists("/system/bin/asanwrapper"))
Пример #2
0
    def test_doesnt_mutate_options(self):
        """Test that calling get_memory_tool_options followed by
    set_memory_tool_options does not mutate sanitizer options unless we
    do so explicitly."""
        # Make environment module use the Windows symbolizer, since its path is
        # hard to get right.
        test_helpers.patch_environ(self)
        os.environ['JOB_NAME'] = 'windows_libfuzzer_chrome_asan'
        test_helpers.patch(self, [
            'system.environment.platform',
            'system.environment.get_llvm_symbolizer_path'
        ])
        self.mock.platform.return_value = 'WINDOWS'
        windows_symbolizer_path = (
            r'c:\clusterfuzz\resources\platform\windows\llvm-symbolizer.exe')

        self.mock.get_llvm_symbolizer_path.return_value = windows_symbolizer_path
        environment.reset_current_memory_tool_options()
        memory_tool_var = 'ASAN_OPTIONS'
        first_asan_options_dict = environment.get_memory_tool_options(
            memory_tool_var)
        environment.set_memory_tool_options(memory_tool_var,
                                            first_asan_options_dict)
        second_asan_options_dict = environment.get_memory_tool_options(
            memory_tool_var)
        self.assertDictEqual(first_asan_options_dict, second_asan_options_dict)
Пример #3
0
def prepare_environment_for_testcase(testcase):
    """Set various environment variables based on the test case."""
    # Setup memory debugging tool environment.
    environment.reset_current_memory_tool_options(
        redzone_size=testcase.redzone)

    # Setup environment variable for windows size and location properties.
    # Explicitly use empty string to indicate use of default window properties.
    if hasattr(testcase, 'window_argument'):
        environment.set_value('WINDOW_ARG', testcase.window_argument)

    # Adjust timeout based on the stored multiplier (if available).
    if hasattr(testcase, 'timeout_multiplier') and testcase.timeout_multiplier:
        test_timeout = environment.get_value('TEST_TIMEOUT')
        environment.set_value('TEST_TIMEOUT',
                              int(test_timeout * testcase.timeout_multiplier))

    # Override APP_ARGS with minimized arguments (if available).
    if (hasattr(testcase, 'minimized_arguments')
            and testcase.minimized_arguments):
        environment.set_value('APP_ARGS', testcase.minimized_arguments)

    # Add FUZZ_TARGET to environment if this is a fuzz target testcase.
    fuzz_target = testcase.get_metadata('fuzzer_binary_name')
    if fuzz_target:
        environment.set_value('FUZZ_TARGET', fuzz_target)
Пример #4
0
    def run(self, timeout):
        """Merge testcases from corpus from other fuzz targets."""
        if not shell.get_directory_file_count(self.context.shared_corpus_path):
            logs.log('No files found in shared corpus, skip merge.')
            return

        # Run pruning on the shared corpus and log the result in case of error.
        logs.log('Merging shared corpus...')
        environment.reset_current_memory_tool_options(
            redzone_size=DEFAULT_REDZONE)
        self.runner.process_sanitizer_options()

        additional_args = self.runner.get_libfuzzer_flags()

        result = self.runner.merge([
            self.context.minimized_corpus_path, self.context.shared_corpus_path
        ],
                                   timeout,
                                   artifact_prefix=self.context.bad_units_path,
                                   tmp_dir=self.context.merge_tmp_dir,
                                   additional_args=additional_args)

        symbolized_output = stack_symbolizer.symbolize_stacktrace(
            result.output)
        if result.timed_out:
            logs.log_error(
                'Corpus pruning timed out while merging shared corpus: %s.' %
                symbolized_output)
        elif result.return_code:
            logs.log_error(
                'Corpus pruning failed to merge shared corpus: %s.' %
                symbolized_output)
        else:
            logs.log('Shared corpus merge finished successfully.',
                     output=symbolized_output)
Пример #5
0
    def run(self, timeout):
        """Merge testcases from corpus from other fuzz targets."""
        if not shell.get_directory_file_count(self.context.shared_corpus_path):
            logs.log("No files found in shared corpus, skip merge.")
            return

        # Run pruning on the shared corpus and log the result in case of error.
        logs.log("Merging shared corpus...")
        environment.reset_current_memory_tool_options(
            redzone_size=DEFAULT_REDZONE)
        self.runner.process_sanitizer_options()

        additional_args = self.runner.get_libfuzzer_flags()

        try:
            result = self.runner.minimize_corpus(
                additional_args,
                [self.context.shared_corpus_path],
                self.context.minimized_corpus_path,
                self.context.bad_units_path,
                timeout,
            )
            symbolized_output = stack_symbolizer.symbolize_stacktrace(
                result.logs)
            logs.log("Shared corpus merge finished successfully.",
                     output=symbolized_output)
        except engine.TimeoutError as e:
            logs.log_error(
                "Corpus pruning timed out while merging shared corpus: " +
                e.message)
        except engine.Error as e:
            raise CorpusPruningException(
                "Corpus pruning failed to merge shared corpus\n" + e.message)
Пример #6
0
def prepare_environment_for_testcase(testcase):
    """Set various environment variables based on the test case."""
    # Setup memory debugging tool environment.
    environment.reset_current_memory_tool_options(
        redzone_size=testcase.redzone)

    # Setup environment variable for windows size and location properties.
    # Explicit override to avoid using the default one from job definition since
    # that contains unsubsituted vars like $WIDTH, etc.
    environment.set_value('WINDOW_ARG', testcase.window_argument)

    # Adjust timeout based on the stored multiplier (if available).
    if testcase.timeout_multiplier:
        test_timeout = environment.get_value('TEST_TIMEOUT')
        environment.set_value('TEST_TIMEOUT',
                              int(test_timeout * testcase.timeout_multiplier))

    # Add FUZZ_TARGET to environment if this is a fuzz target testcase.
    fuzz_target = testcase.get_metadata('fuzzer_binary_name')
    if fuzz_target:
        environment.set_value('FUZZ_TARGET', fuzz_target)

    # Override APP_ARGS with minimized arguments (if available). Don't do this
    # for variant task since other job types can have its own set of required
    # arguments, so use the full set of arguments of that job.
    task_name = environment.get_value('TASK_NAME')
    app_args = _get_application_arguments(testcase, task_name)
    if app_args:
        environment.set_value('APP_ARGS', app_args)
Пример #7
0
  def process_bad_units(self, bad_units_path, quarantine_corpus_path, crashes):
    """Process bad units found during merge."""
    # TODO(ochang): A lot of this function is similar to parts of fuzz_task.
    # Ideally fuzz_task can be refactored in a way that lets us share the common
    # code.

    environment.reset_current_memory_tool_options(redzone_size=DEFAULT_REDZONE)
    self.runner.process_sanitizer_options()

    logs.log('Processing bad units.')
    corpus_file_paths = _get_corpus_file_paths(bad_units_path)
    num_bad_units = 0

    # Run each corpus item individually.
    for i, unit_path in enumerate(corpus_file_paths, 1):
      if i % 100 == 0:
        logs.log('Up to %d' % i)

      unit_name = os.path.basename(unit_path)
      if unit_name.startswith('timeout-') or unit_name.startswith('oom-'):
        # Don't waste time re-running timeout or oom testcases.
        unit_path = self._quarantine_unit(unit_path, quarantine_corpus_path)
        num_bad_units += 1
        continue

      result = self._run_single_unit(unit_path)

      if (not result.timed_out and
          not crash_analyzer.is_memory_tool_crash(result.output)):
        # Didn't crash or time out.
        continue

      if result.timed_out:
        # Slow unit. Quarantine it.
        unit_path = self._quarantine_unit(unit_path, quarantine_corpus_path)
        num_bad_units += 1
        continue

      # Get memory tool crash information.
      state = stack_analyzer.get_crash_data(result.output, symbolize_flag=True)

      # Crashed or caused a leak. Quarantine it.
      unit_path = self._quarantine_unit(unit_path, quarantine_corpus_path)
      num_bad_units += 1

      if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
        continue

      # Local de-duplication.
      if state.crash_state not in crashes:
        security_flag = crash_analyzer.is_security_issue(
            state.crash_stacktrace, state.crash_type, state.crash_address)
        crashes[state.crash_state] = CorpusCrash(
            state.crash_state, state.crash_type, state.crash_address,
            state.crash_stacktrace, unit_path, security_flag)

    logs.log('Found %d bad units, %d unique crashes.' % (num_bad_units,
                                                         len(crashes)))
Пример #8
0
 def test_ubsan_disabled(self):
     """Test reset_current_memory_tool_options when ubsan is disabled."""
     os.environ['JOB_NAME'] = 'libfuzzer_chrome_asan'
     os.environ['UBSAN'] = 'True'
     environment.reset_current_memory_tool_options(disable_ubsan=True)
     self.assertDictEqual(
         {
             'halt_on_error': 0,
             'print_stacktrace': 0,
             'print_suppressions': 0
         }, environment.get_memory_tool_options('UBSAN_OPTIONS'))
Пример #9
0
 def test_ubsan_disabled(self):
     """Test reset_current_memory_tool_options when ubsan is disabled."""
     os.environ["JOB_NAME"] = "libfuzzer_chrome_asan"
     os.environ["UBSAN"] = "True"
     environment.reset_current_memory_tool_options(disable_ubsan=True)
     self.assertDictEqual(
         {
             "halt_on_error": 0,
             "print_stacktrace": 0,
             "print_suppressions": 0
         },
         environment.get_memory_tool_options("UBSAN_OPTIONS"),
     )
Пример #10
0
def _setup_memory_tools_environment(testcase):
    """Set up environment for various memory tools used."""
    env = testcase.get_metadata('env')
    if not env:
        environment.reset_current_memory_tool_options(
            redzone_size=testcase.redzone)
        return

    for options_name, options_value in six.iteritems(env):
        if not options_value:
            environment.remove_key(options_name)
            continue
        environment.set_memory_tool_options(options_name, options_value)
Пример #11
0
  def setUp(self):
    android_helpers.AndroidTest.setUp(self)
    BaseIntegrationTest.setUp(self)

    if android.settings.get_sanitizer_tool_name() != "hwasan":
      raise Exception("Device is not set up with HWASan.")

    environment.set_value("BUILD_DIR", ANDROID_DATA_DIR)
    environment.set_value("JOB_NAME", "libfuzzer_hwasan_android_device")
    environment.reset_current_memory_tool_options()

    self.crash_dir = TEMP_DIR
    self.adb_path = android.adb.get_adb_path()
    self.hwasan_options = 'HWASAN_OPTIONS="%s"' % quote(
        environment.get_value("HWASAN_OPTIONS"))
Пример #12
0
 def test_windows_symbolizer(self):
     """Test that the reset_current_memory_tool_options returns the expected path
 to the llvm symbolizer on Windows."""
     os.environ['JOB_NAME'] = 'windows_libfuzzer_chrome_asan'
     test_helpers.patch(self, [
         'system.environment.platform',
         'system.environment.get_llvm_symbolizer_path'
     ])
     self.mock.platform.return_value = 'WINDOWS'
     windows_symbolizer_path = (
         r'c:\clusterfuzz\resources\platform\windows\llvm-symbolizer.exe')
     self.mock.get_llvm_symbolizer_path.return_value = windows_symbolizer_path
     environment.reset_current_memory_tool_options()
     self.assertIn(
         'external_symbolizer_path="%s"' % windows_symbolizer_path,
         os.environ['ASAN_OPTIONS'])
Пример #13
0
def cleanup_task_state():
  """Cleans state before and after a task is executed."""
  # Cleanup stale processes.
  process_handler.cleanup_stale_processes()

  # Clear build urls, temp and testcase directories.
  shell.clear_build_urls_directory()
  shell.clear_crash_stacktraces_directory()
  shell.clear_testcase_directories()
  shell.clear_temp_directory()
  shell.clear_system_temp_directory()
  shell.clear_device_temp_directories()

  # Reset memory tool environment variables.
  environment.reset_current_memory_tool_options()

  # Call python's garbage collector.
  utils.python_gc()
Пример #14
0
    def setUp(self):
        android_helpers.AndroidTest.setUp(self)
        BaseIntegrationTest.setUp(self)

        if android.settings.get_sanitizer_tool_name() != 'hwasan':
            raise Exception('Device is not set up with HWASan.')

        environment.set_value('BUILD_DIR', ANDROID_DATA_DIR)
        environment.set_value('JOB_NAME', 'libfuzzer_hwasan_android_device')
        environment.reset_current_memory_tool_options()

        self.crash_dir = TEMP_DIR
        self.adb_path = android.adb.get_adb_path()
        self.hwasan_options = 'HWASAN_OPTIONS="%s"' % quote(
            environment.get_value('HWASAN_OPTIONS'))
        self.ld_library_path = (
            'LD_LIBRARY_PATH=' +
            android.sanitizer.get_ld_library_path_for_sanitizers())
Пример #15
0
    def run(self, initial_corpus_path, minimized_corpus_path, bad_units_path):
        """Run corpus pruning. Output result to directory."""
        if not shell.get_directory_file_count(initial_corpus_path):
            # Empty corpus, nothing to do.
            return

        # Set memory tool options and fuzzer arguments.
        engine_common.unpack_seed_corpus_if_needed(self.runner.target_path,
                                                   initial_corpus_path,
                                                   force_unpack=True)

        environment.reset_current_memory_tool_options(redzone_size=MIN_REDZONE,
                                                      leaks=True)
        self.runner.process_sanitizer_options()
        additional_args = self.runner.get_libfuzzer_flags()

        # Execute fuzzer with arguments for corpus pruning.
        logs.log("Running merge...")
        try:
            result = self.runner.minimize_corpus(
                additional_args,
                [initial_corpus_path],
                minimized_corpus_path,
                bad_units_path,
                CORPUS_PRUNING_TIMEOUT,
            )
        except engine.TimeoutError as e:
            raise CorpusPruningException(
                "Corpus pruning timed out while minimizing corpus\n" +
                e.message)
        except engine.Error as e:
            raise CorpusPruningException(
                "Corpus pruning failed to minimize corpus\n" + e.message)

        symbolized_output = stack_symbolizer.symbolize_stacktrace(result.logs)

        # Sanity check that there are files in minimized corpus after merging.
        if not shell.get_directory_file_count(minimized_corpus_path):
            raise CorpusPruningException(
                "Corpus pruning failed to minimize corpus\n" +
                symbolized_output)

        logs.log("Corpus merge finished successfully.",
                 output=symbolized_output)
Пример #16
0
 def test_ubsan_enabled(self):
     """Test reset_current_memory_tool_options when ubsan is enabled."""
     os.environ['JOB_NAME'] = 'libfuzzer_chrome_asan'
     os.environ['UBSAN'] = 'True'
     environment.reset_current_memory_tool_options(disable_ubsan=False)
     self.assertDictEqual(
         {
             'halt_on_error': 1,
             'handle_abort': 1,
             'handle_segv': 1,
             'handle_sigbus': 1,
             'handle_sigfpe': 1,
             'handle_sigill': 1,
             'print_stacktrace': 1,
             'print_summary': 1,
             'print_suppressions': 0,
             'silence_unsigned_overflow': 1,
             'use_sigaltstack': 1
         }, environment.get_memory_tool_options('UBSAN_OPTIONS'))
Пример #17
0
 def test_windows_symbolizer(self):
     """Test that the reset_current_memory_tool_options returns the expected path
     to the llvm symbolizer on Windows."""
     os.environ["JOB_NAME"] = "windows_libfuzzer_chrome_asan"
     test_helpers.patch(
         self,
         [
             "system.environment.platform",
             "system.environment.get_llvm_symbolizer_path",
         ],
     )
     self.mock.platform.return_value = "WINDOWS"
     windows_symbolizer_path = (
         r"c:\clusterfuzz\resources\platform\windows\llvm-symbolizer.exe")
     self.mock.get_llvm_symbolizer_path.return_value = windows_symbolizer_path
     environment.reset_current_memory_tool_options()
     self.assertIn(
         'external_symbolizer_path="%s"' % windows_symbolizer_path,
         os.environ["ASAN_OPTIONS"],
     )
Пример #18
0
    def run(self, initial_corpus_path, minimized_corpus_path, bad_units_path):
        """Run corpus pruning. Output result to directory."""
        if not shell.get_directory_file_count(initial_corpus_path):
            # Empty corpus, nothing to do.
            return

        # Set memory tool options and fuzzer arguments.
        engine_common.unpack_seed_corpus_if_needed(self.runner.fuzzer_path,
                                                   initial_corpus_path,
                                                   force_unpack=True)

        environment.reset_current_memory_tool_options(redzone_size=MIN_REDZONE,
                                                      leaks=True)
        self.runner.process_sanitizer_options()
        additional_args = self.runner.get_libfuzzer_flags()

        # Execute fuzzer with arguments for corpus pruning.
        logs.log('Running merge...')
        result = self.runner.merge(
            [minimized_corpus_path, initial_corpus_path],
            CORPUS_PRUNING_TIMEOUT,
            artifact_prefix=bad_units_path,
            tmp_dir=self.context.merge_tmp_dir,
            additional_args=additional_args)

        # Sanity check that we didn't time out.
        symbolized_output = stack_symbolizer.symbolize_stacktrace(
            result.output)
        if result.timed_out:
            raise CorpusPruningException(
                'Corpus pruning timed out while merging corpus: %s.' %
                symbolized_output)
        # Sanity check that we didn't error out and there are files in minimized
        # corpus after merging.
        if (result.return_code
                or not shell.get_directory_file_count(minimized_corpus_path)):
            raise CorpusPruningException(
                'Corpus pruning failed to merge corpus: %s.' %
                symbolized_output)
        logs.log('Corpus merge finished successfully.',
                 output=symbolized_output)
Пример #19
0
 def test_ubsan_enabled(self):
     """Test reset_current_memory_tool_options when ubsan is enabled."""
     os.environ["JOB_NAME"] = "libfuzzer_chrome_asan"
     os.environ["UBSAN"] = "True"
     environment.reset_current_memory_tool_options(disable_ubsan=False)
     self.assertDictEqual(
         {
             "halt_on_error": 1,
             "handle_abort": 1,
             "handle_segv": 1,
             "handle_sigbus": 1,
             "handle_sigfpe": 1,
             "handle_sigill": 1,
             "print_stacktrace": 1,
             "print_summary": 1,
             "print_suppressions": 0,
             "silence_unsigned_overflow": 1,
             "use_sigaltstack": 1,
         },
         environment.get_memory_tool_options("UBSAN_OPTIONS"),
     )
Пример #20
0
    def run(self, timeout):
        """Merge testcases from corpus from other fuzz targets."""
        if not shell.get_directory_file_count(self.context.shared_corpus_path):
            logs.log('No files found in shared corpus, skip merge.')
            return None

        # Run pruning on the shared corpus and log the result in case of error.
        logs.log('Merging shared corpus...')
        environment.reset_current_memory_tool_options(
            redzone_size=DEFAULT_REDZONE)
        self.runner.process_sanitizer_options()

        additional_args = self.runner.get_libfuzzer_flags()

        try:
            result = self.runner.minimize_corpus(
                additional_args, [self.context.shared_corpus_path],
                self.context.minimized_corpus_path,
                self.context.bad_units_path, timeout)
            symbolized_output = stack_symbolizer.symbolize_stacktrace(
                result.logs)
            logs.log('Shared corpus merge finished successfully.',
                     output=symbolized_output)
        except engine.TimeoutError as e:
            # Other cross pollinated fuzzer corpuses can have unexpected test cases
            # that time us out. This is expected, so bail out.
            logs.log_warn(
                'Corpus pruning timed out while merging shared corpus\n' +
                repr(e))
            return None
        except engine.Error as e:
            # Other cross pollinated fuzzer corpuses can be large, so we can run out
            # of disk space and exception out. This is expected, so bail out.
            logs.log_warn('Corpus pruning failed to merge shared corpus\n' +
                          repr(e))
            return None

        return result.stats
Пример #21
0
def execute_task(testcase_id, job_type):
  """Run analyze task."""
  # Reset redzones.
  environment.reset_current_memory_tool_options(redzone_size=128)

  # Unset window location size and position properties so as to use default.
  environment.set_value('WINDOW_ARG', '')

  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  metadata = data_types.TestcaseUploadMetadata.query(
      data_types.TestcaseUploadMetadata.testcase_id == int(testcase_id)).get()
  if not metadata:
    logs.log_error(
        'Testcase %s has no associated upload metadata.' % testcase_id)
    testcase.key.delete()
    return

  is_lsan_enabled = environment.get_value('LSAN')
  if is_lsan_enabled:
    # Creates empty local blacklist so all leaks will be visible to uploader.
    leak_blacklist.create_empty_local_blacklist()

  # Store the bot name and timestamp in upload metadata.
  bot_name = environment.get_value('BOT_NAME')
  metadata.bot_name = bot_name
  metadata.timestamp = datetime.datetime.utcnow()
  metadata.put()

  # Adjust the test timeout, if user has provided one.
  if metadata.timeout:
    environment.set_value('TEST_TIMEOUT', metadata.timeout)

  # Adjust the number of retries, if user has provided one.
  if metadata.retries is not None:
    environment.set_value('CRASH_RETRIES', metadata.retries)

  # Setup testcase and get absolute testcase path.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase)
  if not file_list:
    return

  # Set up a custom or regular build based on revision.
  build_manager.setup_build(testcase.crash_revision)

  # Check if we have an application path. If not, our build failed
  # to setup correctly.
  app_path = environment.get_value('APP_PATH')
  if not app_path:
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         'Build setup failed')

    if data_handler.is_first_retry_for_task(testcase):
      build_fail_wait = environment.get_value('FAIL_WAIT')
      tasks.add_task(
          'analyze', testcase_id, job_type, wait_time=build_fail_wait)
    else:
      close_invalid_testcase_and_update_status(testcase, metadata,
                                               'Build setup failed')
    return

  # Update initial testcase information.
  testcase.absolute_path = testcase_file_path
  testcase.job_type = job_type
  testcase.binary_flag = utils.is_binary_file(testcase_file_path)
  testcase.queue = tasks.default_queue()
  testcase.crash_state = ''

  # Set initial testcase metadata fields (e.g. build url, etc).
  data_handler.set_initial_testcase_metadata(testcase)

  # Update minimized arguments and use ones provided during user upload.
  if not testcase.minimized_arguments:
    minimized_arguments = environment.get_value('APP_ARGS') or ''
    additional_command_line_flags = testcase.get_metadata(
        'uploaded_additional_args')
    if additional_command_line_flags:
      minimized_arguments += ' %s' % additional_command_line_flags
    environment.set_value('APP_ARGS', minimized_arguments)
    testcase.minimized_arguments = minimized_arguments

  # Update other fields not set at upload time.
  testcase.crash_revision = environment.get_value('APP_REVISION')
  data_handler.set_initial_testcase_metadata(testcase)
  testcase.put()

  # Initialize some variables.
  gestures = testcase.gestures
  http_flag = testcase.http_flag
  test_timeout = environment.get_value('TEST_TIMEOUT')

  # Get the crash output.
  result = testcase_manager.test_for_crash_with_retries(
      testcase,
      testcase_file_path,
      test_timeout,
      http_flag=http_flag,
      compare_crash=False)

  # If we don't get a crash, try enabling http to see if we can get a crash.
  # Skip engine fuzzer jobs (e.g. libFuzzer, AFL) for which http testcase paths
  # are not applicable.
  if (not result.is_crash() and not http_flag and
      not environment.is_engine_fuzzer_job()):
    result_with_http = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=True,
        compare_crash=False)
    if result_with_http.is_crash():
      logs.log('Testcase needs http flag for crash.')
      http_flag = True
      result = result_with_http

  # Refresh our object.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  # Set application command line with the correct http flag.
  application_command_line = (
      testcase_manager.get_command_line_for_application(
          testcase_file_path, needs_http=http_flag))

  # Get the crash data.
  crashed = result.is_crash()
  crash_time = result.get_crash_time()
  state = result.get_symbolized_data()
  unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)

  # Get crash info object with minidump info. Also, re-generate unsymbolized
  # stacktrace if needed.
  crash_info, _ = (
      crash_uploader.get_crash_info_and_stacktrace(
          application_command_line, state.crash_stacktrace, gestures))
  if crash_info:
    testcase.minidump_keys = crash_info.store_minidump()

  if not crashed:
    # Could not reproduce the crash.
    log_message = (
        'Testcase didn\'t crash in %d seconds (with retries)' % test_timeout)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.FINISHED, log_message)

    # For an unreproducible testcase, retry once on another bot to confirm
    # our results and in case this bot is in a bad state which we didn't catch
    # through our usual means.
    if data_handler.is_first_retry_for_task(testcase):
      testcase.status = 'Unreproducible, retrying'
      testcase.put()

      tasks.add_task('analyze', testcase_id, job_type)
      return

    # In the general case, we will not attempt to symbolize if we do not detect
    # a crash. For user uploads, we should symbolize anyway to provide more
    # information about what might be happening.
    crash_stacktrace_output = utils.get_crash_stacktrace_output(
        application_command_line, state.crash_stacktrace,
        unsymbolized_crash_stacktrace)
    testcase.crash_stacktrace = data_handler.filter_stacktrace(
        crash_stacktrace_output)
    close_invalid_testcase_and_update_status(testcase, metadata,
                                             'Unreproducible')

    # A non-reproducing testcase might still impact production branches.
    # Add the impact task to get that information.
    task_creation.create_impact_task_if_needed(testcase)
    return

  # Update http flag and re-run testcase to store dependencies (for bundled
  # archives only).
  testcase.http_flag = http_flag
  if not store_testcase_dependencies_from_bundled_testcase_archive(
      metadata, testcase, testcase_file_path):
    return

  # Update testcase crash parameters.
  testcase.crash_type = state.crash_type
  testcase.crash_address = state.crash_address
  testcase.crash_state = state.crash_state

  # Try to guess if the bug is security or not.
  security_flag = crash_analyzer.is_security_issue(
      state.crash_stacktrace, state.crash_type, state.crash_address)
  testcase.security_flag = security_flag

  # If it is, guess the severity.
  if security_flag:
    testcase.security_severity = severity_analyzer.get_security_severity(
        state.crash_type, state.crash_stacktrace, job_type, bool(gestures))

  log_message = ('Testcase crashed in %d seconds (r%d)' %
                 (crash_time, testcase.crash_revision))
  data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED,
                                       log_message)

  # See if we have to ignore this crash.
  if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
    close_invalid_testcase_and_update_status(testcase, metadata, 'Irrelavant')
    return

  # Test for reproducibility.
  one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
      testcase_file_path, state.crash_state, security_flag, test_timeout,
      http_flag, gestures)
  testcase.one_time_crasher_flag = one_time_crasher_flag

  # Check to see if this is a duplicate.
  project_name = data_handler.get_project_name(job_type)
  existing_testcase = data_handler.find_testcase(
      project_name, state.crash_type, state.crash_state, security_flag)
  if existing_testcase:
    # If the existing test case is unreproducible and we are, replace the
    # existing test case with this one.
    if existing_testcase.one_time_crasher_flag and not one_time_crasher_flag:
      duplicate_testcase = existing_testcase
      original_testcase = testcase
    else:
      duplicate_testcase = testcase
      original_testcase = existing_testcase
      metadata.status = 'Duplicate'
      metadata.duplicate_of = existing_testcase.key.id()

    duplicate_testcase.status = 'Duplicate'
    duplicate_testcase.duplicate_of = original_testcase.key.id()
    duplicate_testcase.put()

  # Set testcase and metadata status if not set already.
  if testcase.status != 'Duplicate':
    testcase.status = 'Processed'
    metadata.status = 'Confirmed'

    # Add new leaks to global blacklist to avoid detecting duplicates.
    # Only add if testcase has a direct leak crash and if it's reproducible.
    if is_lsan_enabled:
      leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase)

  # Add application specific information in the trace.
  crash_stacktrace_output = utils.get_crash_stacktrace_output(
      application_command_line, state.crash_stacktrace,
      unsymbolized_crash_stacktrace)
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      crash_stacktrace_output)

  # Update the testcase values.
  testcase.put()

  # Update the upload metadata.
  metadata.security_flag = security_flag
  metadata.put()

  # Create tasks to
  # 1. Minimize testcase (minimize).
  # 2. Find regression range (regression).
  # 3. Find testcase impact on production branches (impact).
  # 4. Check whether testcase is fixed (progression).
  # 5. Get second stacktrace from another job in case of
  #    one-time crashers (stack).
  task_creation.create_tasks(testcase)
Пример #22
0
def setup_testcase(testcase):
    """Sets up the testcase and needed dependencies like fuzzer,
  data bundle, etc."""
    fuzzer_name = testcase.fuzzer_name
    job_type = testcase.job_type
    task_name = environment.get_value('TASK_NAME')
    testcase_fail_wait = environment.get_value('FAIL_WAIT')
    testcase_id = testcase.key.id()

    # Clear testcase directories.
    shell.clear_testcase_directories()

    # Setup memory debugging tool environment.
    environment.reset_current_memory_tool_options(
        redzone_size=testcase.redzone)

    # Adjust the test timeout value if this is coming from an user uploaded
    # testcase.
    _set_timeout_value_from_user_upload(testcase_id)

    if task_name == 'minimize':
        # Allow minimizing with a different fuzzer set up.
        minimize_fuzzer_override = environment.get_value(
            'MINIMIZE_FUZZER_OVERRIDE')
        fuzzer_name = minimize_fuzzer_override or fuzzer_name

    # Update the fuzzer if necessary in order to get the updated data bundle.
    if fuzzer_name:
        try:
            update_successful = update_fuzzer_and_data_bundles(fuzzer_name)
        except errors.InvalidFuzzerError:
            # Close testcase and don't recreate tasks if this fuzzer is invalid.
            testcase.open = False
            testcase.fixed = 'NA'
            testcase.set_metadata('fuzzer_was_deleted', True)
            logs.log_error('Closed testcase %d with invalid fuzzer %s.' %
                           (testcase_id, fuzzer_name))

            error_message = 'Fuzzer %s no longer exists.' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            return None, None, None

        if not update_successful:
            error_message = 'Unable to setup fuzzer %s.' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            tasks.add_task(task_name,
                           testcase_id,
                           job_type,
                           wait_time=testcase_fail_wait)
            return None, None, None

    # Extract the testcase and any of its resources to the input directory.
    file_list, input_directory, testcase_file_path = unpack_testcase(testcase)
    if not file_list:
        error_message = 'Unable to setup testcase %s.' % testcase_file_path
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        tasks.add_task(task_name,
                       testcase_id,
                       job_type,
                       wait_time=testcase_fail_wait)
        return None, None, None

    # For Android/Fuchsia, we need to sync our local testcases directory with the
    # one on the device.
    if environment.platform() == 'ANDROID':
        _copy_testcase_to_device_and_setup_environment(testcase,
                                                       testcase_file_path)

    if environment.platform() == 'FUCHSIA':
        fuchsia.device.copy_testcase_to_device(testcase_file_path)

    # Push testcases to worker.
    if environment.is_trusted_host():
        from bot.untrusted_runner import file_host
        file_host.push_testcases_to_worker()

    # Copy global blacklist into local blacklist.
    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # Get local blacklist without this testcase's entry.
        leak_blacklist.copy_global_to_local_blacklist(
            excluded_testcase=testcase)

    # Setup environment variable for windows size and location properties.
    # Explicitly use empty string to indicate use of default window properties.
    if hasattr(testcase, 'window_argument'):
        environment.set_value('WINDOW_ARG', testcase.window_argument)

    # Adjust timeout based on the stored multiplier (if available).
    if hasattr(testcase, 'timeout_multiplier') and testcase.timeout_multiplier:
        test_timeout = environment.get_value('TEST_TIMEOUT')
        environment.set_value('TEST_TIMEOUT',
                              int(test_timeout * testcase.timeout_multiplier))

    # Override APP_ARGS with minimized arguments (if available).
    if (hasattr(testcase, 'minimized_arguments')
            and testcase.minimized_arguments):
        environment.set_value('APP_ARGS', testcase.minimized_arguments)

    # Add FUZZ_TARGET to environment if this is a fuzz target testcase.
    fuzz_target = testcase.get_metadata('fuzzer_binary_name')
    if fuzz_target:
        environment.set_value('FUZZ_TARGET', fuzz_target)

    return file_list, input_directory, testcase_file_path
Пример #23
0
def execute_task(testcase_id, job_type):
  """Execute a symbolize command."""
  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)

  # We should atleast have a symbolized debug or release build.
  if not build_manager.has_symbolized_builds():
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  # Setup testcase and its dependencies.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase, job_type)
  if not file_list:
    return

  # Initialize variables.
  build_fail_wait = environment.get_value("FAIL_WAIT")

  old_crash_stacktrace = data_handler.get_stacktrace(testcase)
  sym_crash_type = testcase.crash_type
  sym_crash_address = testcase.crash_address
  sym_crash_state = testcase.crash_state
  sym_redzone = DEFAULT_REDZONE
  warmup_timeout = environment.get_value("WARMUP_TIMEOUT")

  # Decide which build revision to use.
  if testcase.crash_stacktrace == "Pending":
    # This usually happen when someone clicked the 'Update stacktrace from
    # trunk' button on the testcase details page. In this case, we are forced
    # to use trunk. No revision -> trunk build.
    build_revision = None
  else:
    build_revision = testcase.crash_revision

  # Set up a custom or regular build based on revision.
  build_manager.setup_build(build_revision)

  # Get crash revision used in setting up build.
  crash_revision = environment.get_value("APP_REVISION")

  if not build_manager.check_app_path():
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         "Build setup failed")
    tasks.add_task(
        "symbolize", testcase_id, job_type, wait_time=build_fail_wait)
    return

  # ASAN tool settings (if the tool is used).
  # See if we can get better stacks with higher redzone sizes.
  # A UAF might actually turn out to be OOB read/write with a bigger redzone.
  if environment.tool_matches("ASAN", job_type) and testcase.security_flag:
    redzone = MAX_REDZONE
    while redzone >= MIN_REDZONE:
      environment.reset_current_memory_tool_options(
          redzone_size=testcase.redzone, disable_ubsan=testcase.disable_ubsan)

      process_handler.terminate_stale_application_instances()
      command = testcase_manager.get_command_line_for_application(
          testcase_file_path, needs_http=testcase.http_flag)
      return_code, crash_time, output = process_handler.run_process(
          command, timeout=warmup_timeout, gestures=testcase.gestures)
      crash_result = CrashResult(return_code, crash_time, output)

      if crash_result.is_crash() and "AddressSanitizer" in output:
        state = crash_result.get_symbolized_data()
        security_flag = crash_result.is_security_issue()

        if (not crash_analyzer.ignore_stacktrace(state.crash_stacktrace) and
            security_flag == testcase.security_flag and
            state.crash_type == testcase.crash_type and
            (state.crash_type != sym_crash_type or
             state.crash_state != sym_crash_state)):
          logs.log("Changing crash parameters.\nOld : %s, %s, %s" %
                   (sym_crash_type, sym_crash_address, sym_crash_state))

          sym_crash_type = state.crash_type
          sym_crash_address = state.crash_address
          sym_crash_state = state.crash_state
          sym_redzone = redzone
          old_crash_stacktrace = state.crash_stacktrace

          logs.log("\nNew : %s, %s, %s" %
                   (sym_crash_type, sym_crash_address, sym_crash_state))
          break

      redzone /= 2

  # We should have atleast a symbolized debug or a release build.
  symbolized_builds = build_manager.setup_symbolized_builds(crash_revision)
  if not symbolized_builds or (
      not build_manager.check_app_path() and
      not build_manager.check_app_path("APP_PATH_DEBUG")):
    testcase = data_handler.get_testcase_by_id(testcase_id)
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         "Build setup failed")
    tasks.add_task(
        "symbolize", testcase_id, job_type, wait_time=build_fail_wait)
    return

  # Increase malloc_context_size to get all stack frames. Default is 30.
  environment.reset_current_memory_tool_options(
      redzone_size=sym_redzone,
      malloc_context_size=STACK_FRAME_COUNT,
      symbolize_inline_frames=True,
      disable_ubsan=testcase.disable_ubsan,
  )

  # TSAN tool settings (if the tool is used).
  if environment.tool_matches("TSAN", job_type):
    environment.set_tsan_max_history_size()

  # Do the symbolization if supported by this application.
  result, sym_crash_stacktrace = get_symbolized_stacktraces(
      testcase_file_path, testcase, old_crash_stacktrace, sym_crash_state)

  # Update crash parameters.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  testcase.crash_type = sym_crash_type
  testcase.crash_address = sym_crash_address
  testcase.crash_state = sym_crash_state
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      sym_crash_stacktrace)

  if not result:
    data_handler.update_testcase_comment(
        testcase,
        data_types.TaskState.ERROR,
        "Unable to reproduce crash, skipping "
        "stacktrace update",
    )
  else:
    # Switch build url to use the less-optimized symbolized build with better
    # stacktrace.
    build_url = environment.get_value("BUILD_URL")
    if build_url:
      testcase.set_metadata("build_url", build_url, update_testcase=False)

    data_handler.update_testcase_comment(testcase,
                                         data_types.TaskState.FINISHED)

  testcase.symbolized = True
  testcase.crash_revision = crash_revision
  testcase.put()

  # We might have updated the crash state. See if we need to marked as duplicate
  # based on other testcases.
  data_handler.handle_duplicate_entry(testcase)

  task_creation.create_blame_task_if_needed(testcase)

  # Switch current directory before builds cleanup.
  root_directory = environment.get_value("ROOT_DIR")
  os.chdir(root_directory)

  # Cleanup symbolized builds which are space-heavy.
  symbolized_builds.delete()
Пример #24
0
def check_for_bad_build(job_type, crash_revision):
    """Return true if the build is bad, i.e. crashes on startup."""
    # Check the bad build check flag to see if we want do this.
    if not environment.get_value('BAD_BUILD_CHECK'):
        return False

    # Do not detect leaks while checking for bad builds.
    environment.reset_current_memory_tool_options(leaks=False)

    # Create a blank command line with no file to run and no http.
    command = get_command_line_for_application(file_to_run='',
                                               needs_http=False)

    # When checking for bad builds, we use the default window size.
    # We don't want to pick a custom size since it can potentially cause a
    # startup crash and cause a build to be detected incorrectly as bad.
    default_window_argument = environment.get_value('WINDOW_ARG', '')
    if default_window_argument:
        command = command.replace(' %s' % default_window_argument, '')

    # Warmup timeout.
    fast_warmup_timeout = environment.get_value('FAST_WARMUP_TIMEOUT')

    # TSAN is slow, and boots slow on first startup. Increase the warmup
    # timeout for this case.
    if environment.tool_matches('TSAN', job_type):
        fast_warmup_timeout = environment.get_value('WARMUP_TIMEOUT')

    # Initialize helper variables.
    is_bad_build = False
    build_run_console_output = ''
    output = ''
    app_directory = environment.get_value('APP_DIR')

    # Check if the build is bad.
    process_handler.terminate_stale_application_instances()
    exit_code, _, output = process_handler.run_process(
        command,
        timeout=fast_warmup_timeout,
        current_working_directory=app_directory)
    output = utils.decode_to_unicode(output)
    if crash_analyzer.is_crash(exit_code, output):
        is_bad_build = True
        build_run_console_output = (
            '%s\n\n%s\n\n%s' %
            (command, stack_symbolizer.symbolize_stacktrace(output), output))
        logs.log('Bad build for %s detected at r%d.' %
                 (job_type, crash_revision),
                 output=build_run_console_output)

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Any of the conditions below indicate that bot is in a bad state and it is
    # not caused by the build itself. In that case, just exit.
    build_state = data_handler.get_build_state(job_type, crash_revision)
    if (is_bad_build and ('cannot open display' in output
                          or 'logging service has stopped' in output
                          or 'Maximum number of clients reached' in output)):
        logs.log_fatal_and_exit('Bad bot environment detected, exiting.',
                                output=build_run_console_output)

    # If none of the other bots have added information about this build,
    # then add it now.
    if build_state == data_types.BuildState.UNMARKED:
        data_handler.add_build_metadata(job_type, crash_revision, is_bad_build,
                                        build_run_console_output)

    # Reset memory tool options.
    environment.reset_current_memory_tool_options()

    return is_bad_build