def test_parse_log_and_stats_timeout(self):
     """Test stats parsing and additional performance features extraction
 without applying of stat_overrides."""
     log_lines = self._read_test_data('timeout.txt')
     parsed_stats = stats.parse_performance_features(log_lines, [], [])
     self.assertEqual(0, parsed_stats['oom_count'])
     self.assertEqual(1, parsed_stats['timeout_count'])
 def test_parse_log_and_stats_startup_crash(self):
     """Test stats parsing and additional performance features extraction
 without applying of stat_overrides."""
     log_lines = self._read_test_data('startup_crash.txt')
     parsed_stats = stats.parse_performance_features(
         log_lines, [], ['-max_len=1337'])
     self.assertEqual(0, parsed_stats['crash_count'])
     self.assertEqual(0, parsed_stats['corpus_crash_count'])
     self.assertEqual(1, parsed_stats['startup_crash_count'])
    def test_parse_log(self):
        """Test stats parsing and additional performance features extraction
    without applying of stat_overrides."""
        log_lines = self._read_test_data('no_crash_with_strategies.txt')
        parsed_stats = stats.parse_performance_features(log_lines, [], [])
        expected_stats = {
            'bad_instrumentation': 0,
            'corpus_crash_count': 0,
            'crash_count': 0,
            'corpus_size': 0,
            'dict_used': 1,
            'edge_coverage': 0,
            'edges_total': 398408,
            'feature_coverage': 0,
            'initial_edge_coverage': 0,
            'initial_feature_coverage': 0,
            'leak_count': 0,
            'log_lines_from_engine': 65,
            'log_lines_ignored': 8,
            'log_lines_unwanted': 0,
            'manual_dict_size': 0,
            'max_len': 741802,
            'merge_edge_coverage': 0,
            'new_edges': 0,
            'new_features': 0,
            'oom_count': 0,
            'recommended_dict_size': 0,
            'slow_unit_count': 0,
            'slow_units_count': 0,
            'startup_crash_count': 0,
            'strategy_dataflow_tracing': 0,
            'strategy_corpus_mutations_radamsa': 1,
            'strategy_corpus_mutations_ml_rnn': 0,
            'strategy_corpus_subset': 50,
            'strategy_fork': 1,
            'strategy_mutator_plugin_radamsa': 0,
            'strategy_peach_grammar_mutation': '',
            'strategy_mutator_plugin': 1,
            'strategy_random_max_len': 1,
            'strategy_recommended_dict': 0,
            'strategy_selection_method': 'default',
            'strategy_value_profile': 0,
            'timeout_count': 0
        }

        self.assertDictEqual(expected_stats, parsed_stats)
def _get_stats_from_log(log_path,
                        strategies=None,
                        arguments=None,
                        stats_overrides=None):
    """Calculate stats for the given log the same way as the engine does."""
    if strategies is None:
        strategies = []
    if arguments is None:
        arguments = []

    log_lines = utils.decode_to_unicode(
        utils.read_data_from_file(log_path, eval_data=False)).splitlines()
    stats = libfuzzer.parse_log_stats(log_lines)
    stats.update(
        performance_stats.parse_performance_features(log_lines, strategies,
                                                     arguments))
    if stats_overrides:
        stats.update(stats_overrides)

    return stats
Beispiel #5
0
    def fuzz(self, target_path, options, reproducers_dir, max_time):
        """Run a fuzz session.

    Args:
      target_path: Path to the target.
      options: The FuzzOptions object returned by prepare().
      reproducers_dir: The directory to put reproducers in when crashes
          are found.
      max_time: Maximum allowed time for the fuzzing to run.

    Returns:
      A FuzzResult object.
    """
        profiler.start_if_needed('libfuzzer_fuzz')
        runner = libfuzzer.get_runner(target_path)
        libfuzzer.set_sanitizer_options(target_path, fuzz_options=options)

        # Directory to place new units.
        if options.merge_back_new_testcases:
            new_corpus_dir = self._create_temp_corpus_dir('new')
            corpus_directories = [new_corpus_dir] + options.fuzz_corpus_dirs
        else:
            corpus_directories = options.fuzz_corpus_dirs

        fuzz_result = runner.fuzz(corpus_directories,
                                  fuzz_timeout=max_time,
                                  additional_args=options.arguments,
                                  artifact_prefix=reproducers_dir,
                                  extra_env=options.extra_env)

        project_qualified_fuzzer_name = (
            engine_common.get_project_qualified_fuzzer_name(target_path))
        dict_error_match = DICT_PARSING_FAILED_REGEX.search(fuzz_result.output)
        if dict_error_match:
            logs.log_error(
                'Dictionary parsing failed '
                f'(target={project_qualified_fuzzer_name}, '
                f'line={dict_error_match.group(1)}).',
                engine_output=fuzz_result.output)
        elif (not environment.get_value('USE_MINIJAIL') and
              fuzz_result.return_code == constants.LIBFUZZER_ERROR_EXITCODE):
            # Minijail returns 1 if the exit code is nonzero.
            # Otherwise: we can assume that a return code of 1 means that libFuzzer
            # itself ran into an error.
            logs.log_error(ENGINE_ERROR_MESSAGE +
                           f' (target={project_qualified_fuzzer_name}).',
                           engine_output=fuzz_result.output)

        log_lines = fuzz_result.output.splitlines()
        # Output can be large, so save some memory by removing reference to the
        # original output which is no longer needed.
        fuzz_result.output = None

        # Check if we crashed, and get the crash testcase path.
        crash_testcase_file_path = runner.get_testcase_path(log_lines)

        # If we exited with a non-zero return code with no crash file in output from
        # libFuzzer, this is most likely a startup crash. Use an empty testcase to
        # to store it as a crash.
        if (not crash_testcase_file_path and fuzz_result.return_code
                not in constants.NONCRASH_RETURN_CODES):
            crash_testcase_file_path = self._create_empty_testcase_file(
                reproducers_dir)

        # Parse stats information based on libFuzzer output.
        parsed_stats = libfuzzer.parse_log_stats(log_lines)

        # Extend parsed stats by additional performance features.
        parsed_stats.update(
            stats.parse_performance_features(log_lines, options.strategies,
                                             options.arguments))

        # Set some initial stat overrides.
        timeout_limit = fuzzer_utils.extract_argument(options.arguments,
                                                      constants.TIMEOUT_FLAG,
                                                      remove=False)

        actual_duration = int(fuzz_result.time_executed)
        fuzzing_time_percent = 100 * actual_duration / float(max_time)
        parsed_stats.update({
            'timeout_limit': int(timeout_limit),
            'expected_duration': int(max_time),
            'actual_duration': actual_duration,
            'fuzzing_time_percent': fuzzing_time_percent,
        })

        # Remove fuzzing arguments before merge and dictionary analysis step.
        non_fuzz_arguments = options.arguments.copy()
        libfuzzer.remove_fuzzing_arguments(non_fuzz_arguments, is_merge=True)

        if options.merge_back_new_testcases:
            self._merge_new_units(target_path, options.corpus_dir,
                                  new_corpus_dir, options.fuzz_corpus_dirs,
                                  non_fuzz_arguments, parsed_stats)

        fuzz_logs = '\n'.join(log_lines)
        crashes = []
        if crash_testcase_file_path:
            reproduce_arguments = options.arguments[:]
            libfuzzer.remove_fuzzing_arguments(reproduce_arguments)

            # Use higher timeout for reproduction.
            libfuzzer.fix_timeout_argument_for_reproduction(
                reproduce_arguments)

            # Write the new testcase.
            # Copy crash testcase contents into the main testcase path.
            crashes.append(
                engine.Crash(crash_testcase_file_path, fuzz_logs,
                             reproduce_arguments, actual_duration))

        if options.analyze_dictionary:
            libfuzzer.analyze_and_update_recommended_dictionary(
                runner, project_qualified_fuzzer_name, log_lines,
                options.corpus_dir, non_fuzz_arguments)

        return engine.FuzzResult(fuzz_logs, fuzz_result.command, crashes,
                                 parsed_stats, fuzz_result.time_executed)
 def test_parse_log_and_stats_corpus_crash_with_corpus_subset(self):
     """Test stats parsing and additional performance features extraction
 without applying of stat_overrides."""
     log_lines = self._read_test_data('corpus_crash_with_corpus_subset.txt')
     parsed_stats = stats.parse_performance_features(log_lines, [], [])
     self.assertEqual(1, parsed_stats['strategy_corpus_subset'])