def test_parse_log_stats(self):
    """Test pure stats parsing without applying of stat_overrides."""
    log_lines = self._read_test_data('no_crash.txt')
    parsed_stats = launcher.parse_log_stats(log_lines)
    expected_stats = {
        'average_exec_per_sec': 97,
        'new_units_added': 55,
        'new_units_generated': 55,
        'number_of_executed_units': 258724,
        'peak_rss_mb': 103,
        'slowest_unit_time_sec': 0
    }

    self.assertEqual(parsed_stats, expected_stats)
Esempio n. 2
0
def _get_stats_from_log(log_path,
                        strategies=None,
                        arguments=None,
                        stats_overrides=None):
    """Calculate stats for the given log the same way as the launcher does."""
    if strategies is None:
        strategies = []
    if arguments is None:
        arguments = []

    log_lines = utils.read_data_from_file(log_path,
                                          eval_data=False).splitlines()
    stats = launcher.parse_log_stats(log_lines)
    stats.update(
        performance_stats.parse_performance_features(log_lines, strategies,
                                                     arguments))
    if stats_overrides:
        stats.update(stats_overrides)

    return stats
Esempio n. 3
0
    def fuzz(self, target_path, options, reproducers_dir, max_time):
        """Run a fuzz session.

    Args:
      target_path: Path to the target.
      options: The FuzzOptions object returned by prepare().
      reproducers_dir: The directory to put reproducers in when crashes
          are found.
      max_time: Maximum allowed time for the fuzzing to run.

    Returns:
      A Result object.
    """
        profiler.start_if_needed('libfuzzer_fuzz')
        runner = libfuzzer.get_runner(target_path)
        launcher.set_sanitizer_options(target_path)

        artifact_prefix = self._artifact_prefix(
            os.path.abspath(reproducers_dir))

        # Directory to place new units.
        new_corpus_dir = self._create_temp_corpus_dir('new')

        corpus_directories = [new_corpus_dir] + options.fuzz_corpus_dirs
        fuzz_timeout = launcher.get_fuzz_timeout(options.is_mutations_run,
                                                 total_timeout=max_time)
        fuzz_result = runner.fuzz(corpus_directories,
                                  fuzz_timeout=fuzz_timeout,
                                  additional_args=options.arguments +
                                  [artifact_prefix],
                                  extra_env=options.extra_env)

        log_lines = fuzz_result.output.splitlines()
        # Output can be large, so save some memory by removing reference to the
        # original output which is no longer needed.
        fuzz_result.output = None

        # Check if we crashed, and get the crash testcase path.
        crash_testcase_file_path = None
        for line in log_lines:
            match = re.match(launcher.CRASH_TESTCASE_REGEX, line)
            if match:
                crash_testcase_file_path = match.group(1)
                break

        # Parse stats information based on libFuzzer output.
        parsed_stats = launcher.parse_log_stats(log_lines)

        # Extend parsed stats by additional performance features.
        parsed_stats.update(
            stats.parse_performance_features(log_lines,
                                             options.strategies,
                                             options.arguments,
                                             include_strategies=False))

        # Set some initial stat overrides.
        timeout_limit = fuzzer_utils.extract_argument(options.arguments,
                                                      constants.TIMEOUT_FLAG,
                                                      remove=False)

        expected_duration = runner.get_max_total_time(fuzz_timeout)
        actual_duration = int(fuzz_result.time_executed)
        fuzzing_time_percent = 100 * actual_duration / float(expected_duration)
        parsed_stats.update({
            'timeout_limit': int(timeout_limit),
            'expected_duration': expected_duration,
            'actual_duration': actual_duration,
            'fuzzing_time_percent': fuzzing_time_percent,
        })

        # Remove fuzzing arguments before merge and dictionary analysis step.
        arguments = options.arguments[:]
        launcher.remove_fuzzing_arguments(arguments)

        self._merge_new_units(target_path, options.corpus_dir, new_corpus_dir,
                              options.fuzz_corpus_dirs, arguments,
                              parsed_stats)

        # Add custom crash state based on fuzzer name (if needed).
        project_qualified_fuzzer_name = (
            data_types.fuzz_target_project_qualified_name(
                utils.current_project(), os.path.basename(target_path)))
        launcher.add_custom_crash_state_if_needed(
            project_qualified_fuzzer_name, log_lines, parsed_stats)

        fuzz_logs = '\n'.join(log_lines)
        crashes = []
        if crash_testcase_file_path:
            # Write the new testcase.
            # Copy crash testcase contents into the main testcase path.
            crashes.append(
                engine.Crash(crash_testcase_file_path, fuzz_logs, arguments,
                             actual_duration))

        launcher.analyze_and_update_recommended_dictionary(
            runner, project_qualified_fuzzer_name, log_lines,
            options.corpus_dir, arguments)

        return engine.Result(fuzz_logs, fuzz_result.command, crashes,
                             parsed_stats, fuzz_result.time_executed)
Esempio n. 4
0
    def fuzz(self, target_path, options, reproducers_dir, max_time):
        """Run a fuzz session.

    Args:
      target_path: Path to the target.
      options: The FuzzOptions object returned by prepare().
      reproducers_dir: The directory to put reproducers in when crashes
          are found.
      max_time: Maximum allowed time for the fuzzing to run.

    Returns:
      A FuzzResult object.
    """
        profiler.start_if_needed('libfuzzer_fuzz')
        runner = libfuzzer.get_runner(target_path)
        launcher.set_sanitizer_options(target_path)

        # Directory to place new units.
        new_corpus_dir = self._create_temp_corpus_dir('new')

        corpus_directories = [new_corpus_dir] + options.fuzz_corpus_dirs
        fuzz_timeout = launcher.get_fuzz_timeout(options.is_mutations_run,
                                                 total_timeout=max_time)
        fuzz_result = runner.fuzz(corpus_directories,
                                  fuzz_timeout=fuzz_timeout,
                                  additional_args=options.arguments,
                                  artifact_prefix=reproducers_dir,
                                  extra_env=options.extra_env)

        if (not environment.get_value('USE_MINIJAIL') and
                fuzz_result.return_code == constants.LIBFUZZER_ERROR_EXITCODE):
            # Minijail returns 1 if the exit code is nonzero.
            # Otherwise: we can assume that a return code of 1 means that libFuzzer
            # itself ran into an error.
            logs.log_error(ENGINE_ERROR_MESSAGE,
                           engine_output=fuzz_result.output)

        log_lines = utils.decode_to_unicode(fuzz_result.output).splitlines()
        # Output can be large, so save some memory by removing reference to the
        # original output which is no longer needed.
        fuzz_result.output = None

        # Check if we crashed, and get the crash testcase path.
        crash_testcase_file_path = runner.get_testcase_path(log_lines)

        # Parse stats information based on libFuzzer output.
        parsed_stats = launcher.parse_log_stats(log_lines)

        # Extend parsed stats by additional performance features.
        parsed_stats.update(
            stats.parse_performance_features(log_lines,
                                             options.strategies,
                                             options.arguments,
                                             include_strategies=False))

        # Set some initial stat overrides.
        timeout_limit = fuzzer_utils.extract_argument(options.arguments,
                                                      constants.TIMEOUT_FLAG,
                                                      remove=False)

        expected_duration = runner.get_max_total_time(fuzz_timeout)
        actual_duration = int(fuzz_result.time_executed)
        fuzzing_time_percent = 100 * actual_duration / float(expected_duration)
        parsed_stats.update({
            'timeout_limit': int(timeout_limit),
            'expected_duration': expected_duration,
            'actual_duration': actual_duration,
            'fuzzing_time_percent': fuzzing_time_percent,
        })

        # Remove fuzzing arguments before merge and dictionary analysis step.
        arguments = options.arguments[:]
        launcher.remove_fuzzing_arguments(arguments)

        self._merge_new_units(target_path, options.corpus_dir, new_corpus_dir,
                              options.fuzz_corpus_dirs, arguments,
                              parsed_stats)

        fuzz_logs = '\n'.join(log_lines)
        crashes = []
        if crash_testcase_file_path:
            # Write the new testcase.
            # Copy crash testcase contents into the main testcase path.
            crashes.append(
                engine.Crash(crash_testcase_file_path, fuzz_logs, arguments,
                             actual_duration))

        project_qualified_fuzzer_name = (
            data_types.fuzz_target_project_qualified_name(
                utils.current_project(), os.path.basename(target_path)))
        launcher.analyze_and_update_recommended_dictionary(
            runner, project_qualified_fuzzer_name, log_lines,
            options.corpus_dir, arguments)

        return engine.FuzzResult(fuzz_logs, fuzz_result.command, crashes,
                                 parsed_stats, fuzz_result.time_executed)
 def test_parse_log_and_stats_from_corrupted_output(self):
   """Test stats parsing from a log with corrupted libFuzzer stats."""
   log_lines = self._read_test_data('corrupted_stats.txt')
   parsed_stats = launcher.parse_log_stats(log_lines)
   self.assertNotIn('peak_rss_mb', parsed_stats)