Beispiel #1
0
    def run(self, timeout):
        """Merge testcases from corpus from other fuzz targets."""
        if not shell.get_directory_file_count(self.context.shared_corpus_path):
            logs.log("No files found in shared corpus, skip merge.")
            return

        # Run pruning on the shared corpus and log the result in case of error.
        logs.log("Merging shared corpus...")
        environment.reset_current_memory_tool_options(
            redzone_size=DEFAULT_REDZONE)
        self.runner.process_sanitizer_options()

        additional_args = self.runner.get_libfuzzer_flags()

        try:
            result = self.runner.minimize_corpus(
                additional_args,
                [self.context.shared_corpus_path],
                self.context.minimized_corpus_path,
                self.context.bad_units_path,
                timeout,
            )
            symbolized_output = stack_symbolizer.symbolize_stacktrace(
                result.logs)
            logs.log("Shared corpus merge finished successfully.",
                     output=symbolized_output)
        except engine.TimeoutError as e:
            logs.log_error(
                "Corpus pruning timed out while merging shared corpus: " +
                e.message)
        except engine.Error as e:
            raise CorpusPruningException(
                "Corpus pruning failed to merge shared corpus\n" + e.message)
Beispiel #2
0
    def run(self, timeout):
        """Merge testcases from corpus from other fuzz targets."""
        if not shell.get_directory_file_count(self.context.shared_corpus_path):
            logs.log('No files found in shared corpus, skip merge.')
            return

        # Run pruning on the shared corpus and log the result in case of error.
        logs.log('Merging shared corpus...')
        environment.reset_current_memory_tool_options(
            redzone_size=DEFAULT_REDZONE)
        self.runner.process_sanitizer_options()

        additional_args = self.runner.get_libfuzzer_flags()

        result = self.runner.merge([
            self.context.minimized_corpus_path, self.context.shared_corpus_path
        ],
                                   timeout,
                                   artifact_prefix=self.context.bad_units_path,
                                   tmp_dir=self.context.merge_tmp_dir,
                                   additional_args=additional_args)

        symbolized_output = stack_symbolizer.symbolize_stacktrace(
            result.output)
        if result.timed_out:
            logs.log_error(
                'Corpus pruning timed out while merging shared corpus: %s.' %
                symbolized_output)
        elif result.return_code:
            logs.log_error(
                'Corpus pruning failed to merge shared corpus: %s.' %
                symbolized_output)
        else:
            logs.log('Shared corpus merge finished successfully.',
                     output=symbolized_output)
Beispiel #3
0
def symbolize_stacktrace(request):
    """Symbolize stacktrace."""
    symbolized_stacktrace = stack_symbolizer.symbolize_stacktrace(
        request.unsymbolized_crash_stacktrace, request.enable_inline_frames)

    return untrusted_runner_pb2.SymbolizeStacktraceResponse(
        symbolized_stacktrace=symbolized_stacktrace)
Beispiel #4
0
    def run(self, initial_corpus_path, minimized_corpus_path, bad_units_path):
        """Run corpus pruning. Output result to directory."""
        if not shell.get_directory_file_count(initial_corpus_path):
            # Empty corpus, nothing to do.
            return

        # Set memory tool options and fuzzer arguments.
        engine_common.unpack_seed_corpus_if_needed(self.runner.target_path,
                                                   initial_corpus_path,
                                                   force_unpack=True)

        environment.reset_current_memory_tool_options(redzone_size=MIN_REDZONE,
                                                      leaks=True)
        self.runner.process_sanitizer_options()
        additional_args = self.runner.get_libfuzzer_flags()

        # Execute fuzzer with arguments for corpus pruning.
        logs.log("Running merge...")
        try:
            result = self.runner.minimize_corpus(
                additional_args,
                [initial_corpus_path],
                minimized_corpus_path,
                bad_units_path,
                CORPUS_PRUNING_TIMEOUT,
            )
        except engine.TimeoutError as e:
            raise CorpusPruningException(
                "Corpus pruning timed out while minimizing corpus\n" +
                e.message)
        except engine.Error as e:
            raise CorpusPruningException(
                "Corpus pruning failed to minimize corpus\n" + e.message)

        symbolized_output = stack_symbolizer.symbolize_stacktrace(result.logs)

        # Sanity check that there are files in minimized corpus after merging.
        if not shell.get_directory_file_count(minimized_corpus_path):
            raise CorpusPruningException(
                "Corpus pruning failed to minimize corpus\n" +
                symbolized_output)

        logs.log("Corpus merge finished successfully.",
                 output=symbolized_output)
Beispiel #5
0
    def run(self, initial_corpus_path, minimized_corpus_path, bad_units_path):
        """Run corpus pruning. Output result to directory."""
        if not shell.get_directory_file_count(initial_corpus_path):
            # Empty corpus, nothing to do.
            return

        # Set memory tool options and fuzzer arguments.
        engine_common.unpack_seed_corpus_if_needed(self.runner.fuzzer_path,
                                                   initial_corpus_path,
                                                   force_unpack=True)

        environment.reset_current_memory_tool_options(redzone_size=MIN_REDZONE,
                                                      leaks=True)
        self.runner.process_sanitizer_options()
        additional_args = self.runner.get_libfuzzer_flags()

        # Execute fuzzer with arguments for corpus pruning.
        logs.log('Running merge...')
        result = self.runner.merge(
            [minimized_corpus_path, initial_corpus_path],
            CORPUS_PRUNING_TIMEOUT,
            artifact_prefix=bad_units_path,
            tmp_dir=self.context.merge_tmp_dir,
            additional_args=additional_args)

        # Sanity check that we didn't time out.
        symbolized_output = stack_symbolizer.symbolize_stacktrace(
            result.output)
        if result.timed_out:
            raise CorpusPruningException(
                'Corpus pruning timed out while merging corpus: %s.' %
                symbolized_output)
        # Sanity check that we didn't error out and there are files in minimized
        # corpus after merging.
        if (result.return_code
                or not shell.get_directory_file_count(minimized_corpus_path)):
            raise CorpusPruningException(
                'Corpus pruning failed to merge corpus: %s.' %
                symbolized_output)
        logs.log('Corpus merge finished successfully.',
                 output=symbolized_output)
Beispiel #6
0
    def run(self, timeout):
        """Merge testcases from corpus from other fuzz targets."""
        if not shell.get_directory_file_count(self.context.shared_corpus_path):
            logs.log('No files found in shared corpus, skip merge.')
            return None

        # Run pruning on the shared corpus and log the result in case of error.
        logs.log('Merging shared corpus...')
        environment.reset_current_memory_tool_options(
            redzone_size=DEFAULT_REDZONE)
        self.runner.process_sanitizer_options()

        additional_args = self.runner.get_libfuzzer_flags()

        try:
            result = self.runner.minimize_corpus(
                additional_args, [self.context.shared_corpus_path],
                self.context.minimized_corpus_path,
                self.context.bad_units_path, timeout)
            symbolized_output = stack_symbolizer.symbolize_stacktrace(
                result.logs)
            logs.log('Shared corpus merge finished successfully.',
                     output=symbolized_output)
        except engine.TimeoutError as e:
            # Other cross pollinated fuzzer corpuses can have unexpected test cases
            # that time us out. This is expected, so bail out.
            logs.log_warn(
                'Corpus pruning timed out while merging shared corpus\n' +
                repr(e))
            return None
        except engine.Error as e:
            # Other cross pollinated fuzzer corpuses can be large, so we can run out
            # of disk space and exception out. This is expected, so bail out.
            logs.log_warn('Corpus pruning failed to merge shared corpus\n' +
                          repr(e))
            return None

        return result.stats
Beispiel #7
0
def check_for_bad_build(job_type, crash_revision):
    """Return true if the build is bad, i.e. crashes on startup."""
    # Check the bad build check flag to see if we want do this.
    if not environment.get_value('BAD_BUILD_CHECK'):
        return False

    # Do not detect leaks while checking for bad builds.
    environment.reset_current_memory_tool_options(leaks=False)

    # Create a blank command line with no file to run and no http.
    command = get_command_line_for_application(file_to_run='',
                                               needs_http=False)

    # When checking for bad builds, we use the default window size.
    # We don't want to pick a custom size since it can potentially cause a
    # startup crash and cause a build to be detected incorrectly as bad.
    default_window_argument = environment.get_value('WINDOW_ARG', '')
    if default_window_argument:
        command = command.replace(' %s' % default_window_argument, '')

    # Warmup timeout.
    fast_warmup_timeout = environment.get_value('FAST_WARMUP_TIMEOUT')

    # TSAN is slow, and boots slow on first startup. Increase the warmup
    # timeout for this case.
    if environment.tool_matches('TSAN', job_type):
        fast_warmup_timeout = environment.get_value('WARMUP_TIMEOUT')

    # Initialize helper variables.
    is_bad_build = False
    build_run_console_output = ''
    output = ''
    app_directory = environment.get_value('APP_DIR')

    # Check if the build is bad.
    process_handler.terminate_stale_application_instances()
    exit_code, _, output = process_handler.run_process(
        command,
        timeout=fast_warmup_timeout,
        current_working_directory=app_directory)
    output = utils.decode_to_unicode(output)
    if crash_analyzer.is_crash(exit_code, output):
        is_bad_build = True
        build_run_console_output = (
            '%s\n\n%s\n\n%s' %
            (command, stack_symbolizer.symbolize_stacktrace(output), output))
        logs.log('Bad build for %s detected at r%d.' %
                 (job_type, crash_revision),
                 output=build_run_console_output)

    # Exit all running instances.
    process_handler.terminate_stale_application_instances()

    # Any of the conditions below indicate that bot is in a bad state and it is
    # not caused by the build itself. In that case, just exit.
    build_state = data_handler.get_build_state(job_type, crash_revision)
    if (is_bad_build and ('cannot open display' in output
                          or 'logging service has stopped' in output
                          or 'Maximum number of clients reached' in output)):
        logs.log_fatal_and_exit('Bad bot environment detected, exiting.',
                                output=build_run_console_output)

    # If none of the other bots have added information about this build,
    # then add it now.
    if build_state == data_types.BuildState.UNMARKED:
        data_handler.add_build_metadata(job_type, crash_revision, is_bad_build,
                                        build_run_console_output)

    # Reset memory tool options.
    environment.reset_current_memory_tool_options()

    return is_bad_build
Beispiel #8
0
def get_crash_data(crash_data,
                   symbolize_flag=True,
                   fuzz_target=None,
                   already_symbolized=False,
                   detect_ooms_and_hangs=None):
    """Get crash parameters from crash data.
  Crash parameters include crash type, address, state and stacktrace.
  If the stacktrace is not already symbolized, we will try to symbolize it
  unless |symbolize| flag is set to False. Symbolized stacktrace will contain
  inline frames, but we do exclude them for purposes of crash state generation
  (helps in testcase deduplication)."""
    # Decide whether to symbolize or not symbolize the input stacktrace.
    # Note that Fuchsia logs are always symbolized.
    if symbolize_flag:
        # Defer imports since stack_symbolizer pulls in a lot of things.
        from crash_analysis.stack_parsing import stack_symbolizer
        crash_stacktrace_with_inlines = stack_symbolizer.symbolize_stacktrace(
            crash_data, enable_inline_frames=True)
        crash_stacktrace_without_inlines = stack_symbolizer.symbolize_stacktrace(
            crash_data, enable_inline_frames=False)
    else:
        # We are explicitly indicated to not symbolize using |symbolize_flag|. There
        # is no distinction between inline and non-inline frames for an unsymbolized
        # stacktrace.
        crash_stacktrace_with_inlines = crash_data
        crash_stacktrace_without_inlines = crash_data

    # Additional stack frame ignore regexes.
    custom_stack_frame_ignore_regexes = (local_config.ProjectConfig().get(
        'stacktrace.stack_frame_ignore_regexes', []))

    if environment.get_value('TASK_NAME') == 'analyze':
        detect_v8_runtime_errors = True
    else:
        detect_v8_runtime_errors = environment.get_value(
            'DETECT_V8_RUNTIME_ERRORS', False)

    fuzz_target = fuzz_target or environment.get_value('FUZZ_TARGET')
    redzone_size = environment.get_value('REDZONE')
    if detect_ooms_and_hangs is None:
        detect_ooms_and_hangs = (
            environment.get_value('REPORT_OOMS_AND_HANGS')
            and (not redzone_size
                 or redzone_size <= MAX_REDZONE_SIZE_FOR_OOMS_AND_HANGS))

    include_ubsan = 'halt_on_error=0' not in environment.get_value(
        'UBSAN_OPTIONS', '')

    stack_parser = stacktraces.StackParser(
        symbolized=symbolize_flag or already_symbolized,
        detect_ooms_and_hangs=detect_ooms_and_hangs,
        detect_v8_runtime_errors=detect_v8_runtime_errors,
        custom_stack_frame_ignore_regexes=custom_stack_frame_ignore_regexes,
        fuzz_target=fuzz_target,
        include_ubsan=include_ubsan)

    result = stack_parser.parse(crash_stacktrace_without_inlines)

    # Use stacktrace with inlines for the result.
    if result.crash_stacktrace:
        result.crash_stacktrace = crash_stacktrace_with_inlines

    # Linkify Android stacktrace.
    if environment.is_android() and (result.found_android_kernel_crash
                                     or result.is_kasan):
        linkify_android_stacktrace(result)

    return result