Beispiel #1
0
def main():
    """Prepare the configuration options and start requesting tasks."""
    logs.configure('run_bot')

    root_directory = environment.get_value('ROOT_DIR')
    if not root_directory:
        print(
            'Please set ROOT_DIR environment variable to the root of the source '
            'checkout before running. Exiting.')
        print 'For an example, check init.bash in the local directory.'
        return

    dates.initialize_timezone_from_environment()
    environment.set_bot_environment()
    monitor.initialize()

    if environment.is_trusted_host(ensure_connected=False):
        from bot.untrusted_runner import host
        host.init()

    if environment.is_untrusted_worker():
        # Track revision since we won't go into the task_loop.
        update_task.track_revision()

        from bot.untrusted_runner import untrusted as untrusted_worker
        untrusted_worker.start_server()
        assert False, 'Unreachable code'

    while True:
        # task_loop should be an infinite loop,
        # unless we run into an exception.
        error_stacktrace, clean_exit, task_payload = task_loop()

        # Print the error trace to the console.
        if not clean_exit:
            print 'Exception occurred while running "%s".' % task_payload
            print '-' * 80
            print error_stacktrace
            print '-' * 80

        should_terminate = (clean_exit or errors.error_in_list(
            error_stacktrace, errors.BOT_ERROR_TERMINATION_LIST))
        if should_terminate:
            return

        logs.log_error('Task exited with exception.',
                       error_stacktrace=error_stacktrace,
                       task_payload=task_payload)

        should_hang = errors.error_in_list(error_stacktrace,
                                           errors.BOT_ERROR_HANG_LIST)
        if should_hang:
            logs.log('Start hanging forever.')
            while True:
                # Sleep to avoid consuming 100% of CPU.
                time.sleep(60)

        # See if our run timed out, if yes bail out.
        if data_handler.bot_run_timed_out():
            return

        # Clear the current exception.
        sys.exc_clear()
Beispiel #2
0
                # Build success. Now, copy it to google cloud storage and make it
                # public.
                os.system('gsutil cp %s %s' %
                          (archive_path_local, archive_path_remote))
                os.system('gsutil acl set public-read %s' %
                          archive_path_remote)
                logs.log('Build succeeded, created %s.' % archive_filename)
            else:
                LAST_BUILD[tool_and_build_type] = ''
                logs.log_error('Build failed, unable to create %s.' %
                               archive_filename)

    logs.log('Completed cycle, waiting for %d secs.' % wait_time)
    time.sleep(wait_time)


if __name__ == '__main__':
    # Make sure environment is correctly configured.
    logs.configure('run_bot')
    environment.set_bot_environment()

    fail_wait = environment.get_value('FAIL_WAIT')

    # Continue this forever.
    while 1:
        try:
            main()
        except Exception:
            logs.log_error('Failed to create build.')
            time.sleep(fail_wait)
Beispiel #3
0
def run_testcase_and_return_result_in_queue(crash_queue,
                                            thread_index,
                                            file_path,
                                            gestures,
                                            env_copy,
                                            upload_output=False):
    """Run a single testcase and return crash results in the crash queue."""

    # Since this is running in its own process, initialize the log handler again.
    # This is needed for Windows where instances are not shared across child
    # processes. See:
    # https://stackoverflow.com/questions/34724643/python-logging-with-multiprocessing-root-logger-different-in-windows
    logs.configure('run_testcase', {
        'testcase_path': file_path,
    })

    try:
        # Run testcase and check whether a crash occurred or not.
        return_code, crash_time, output = run_testcase(thread_index, file_path,
                                                       gestures, env_copy)

        # Pull testcase directory to host to get any stats files.
        if environment.is_trusted_host():
            from bot.untrusted_runner import file_host
            file_host.pull_testcases_from_worker()

        # Analyze the crash.
        crash_output = _get_crash_output(output)
        crash_result = CrashResult(return_code, crash_time, crash_output)

        # To provide consistency between stats and logs, we use timestamp taken
        # from stats when uploading logs and testcase.
        if upload_output:
            log_time = _get_testcase_time(file_path)

        if crash_result.is_crash():
            # Initialize resource list with the testcase path.
            resource_list = [file_path]
            resource_list += get_resource_paths(crash_output)

            # Store the crash stack file in the crash stacktrace directory
            # with filename as the hash of the testcase path.
            crash_stacks_directory = environment.get_value(
                'CRASH_STACKTRACES_DIR')
            stack_file_path = os.path.join(crash_stacks_directory,
                                           utils.string_hash(file_path))
            utils.write_data_to_file(crash_output, stack_file_path)

            # Put crash/no-crash results in the crash queue.
            crash_queue.put(
                Crash(file_path=file_path,
                      crash_time=crash_time,
                      return_code=return_code,
                      resource_list=resource_list,
                      gestures=gestures,
                      stack_file_path=stack_file_path))

            # Don't upload uninteresting testcases (no crash) or if there is no log to
            # correlate it with (not upload_output).
            if upload_output:
                upload_testcase(file_path, log_time)

        if upload_output:
            # Include full output for uploaded logs (crash output, merge output, etc).
            crash_result_full = CrashResult(return_code, crash_time, output)
            log = prepare_log_for_upload(crash_result_full.get_stacktrace(),
                                         return_code)
            upload_log(log, log_time)
    except Exception:
        logs.log_error('Exception occurred while running '
                       'run_testcase_and_return_result_in_queue.')
Beispiel #4
0
def main(argv):
  """Run libFuzzer as specified by argv."""
  atexit.register(fuzzer_utils.cleanup)

  # Initialize variables.
  arguments = argv[1:]
  testcase_file_path = arguments.pop(0)
  target_name = arguments.pop(0)
  fuzzer_name = data_types.fuzz_target_project_qualified_name(
      utils.current_project(), target_name)

  # Initialize log handler.
  logs.configure(
      'run_fuzzer', {
          'fuzzer': fuzzer_name,
          'engine': 'libFuzzer',
          'job_name': environment.get_value('JOB_NAME')
      })

  profiler.start_if_needed('libfuzzer_launcher')

  # Make sure that the fuzzer binary exists.
  build_directory = environment.get_value('BUILD_DIR')
  fuzzer_path = engine_common.find_fuzzer_path(build_directory, target_name)
  if not fuzzer_path:
    return

  # Install signal handler.
  signal.signal(signal.SIGTERM, engine_common.signal_term_handler)

  # Set up temp dir.
  engine_common.recreate_directory(fuzzer_utils.get_temp_dir())

  # Setup minijail if needed.
  use_minijail = environment.get_value('USE_MINIJAIL')
  runner = libfuzzer.get_runner(
      fuzzer_path, temp_dir=fuzzer_utils.get_temp_dir())

  if use_minijail:
    minijail_chroot = runner.chroot
  else:
    minijail_chroot = None

  # Get corpus directory.
  corpus_directory = environment.get_value('FUZZ_CORPUS_DIR')

  # Add common arguments which are necessary to be used for every run.
  arguments = expand_with_common_arguments(arguments)

  # Add sanitizer options to environment that were specified in the .options
  # file and options that this script requires.
  set_sanitizer_options(fuzzer_path)

  # Minimize test argument.
  minimize_to = fuzzer_utils.extract_argument(arguments, MINIMIZE_TO_ARGUMENT)
  minimize_timeout = fuzzer_utils.extract_argument(arguments,
                                                   MINIMIZE_TIMEOUT_ARGUMENT)

  if minimize_to and minimize_timeout:
    minimize_testcase(runner, testcase_file_path, minimize_to,
                      int(minimize_timeout), arguments, use_minijail)
    return

  # Cleanse argument.
  cleanse_to = fuzzer_utils.extract_argument(arguments, CLEANSE_TO_ARGUMENT)
  cleanse_timeout = fuzzer_utils.extract_argument(arguments,
                                                  CLEANSE_TIMEOUT_ARGUMENT)

  if cleanse_to and cleanse_timeout:
    cleanse_testcase(runner, testcase_file_path, cleanse_to,
                     int(cleanse_timeout), arguments, use_minijail)
    return

  # If we don't have a corpus, then that means this is not a fuzzing run.
  # TODO(flowerhack): Implement this to properly load past testcases.
  if not corpus_directory and environment.platform() != 'FUCHSIA':
    load_testcase_if_exists(runner, testcase_file_path, fuzzer_name,
                            use_minijail, arguments)
    return

  # We don't have a crash testcase, fuzz.

  # Check dict argument to make sure that it's valid.
  dict_argument = fuzzer_utils.extract_argument(
      arguments, constants.DICT_FLAG, remove=False)
  if dict_argument and not os.path.exists(dict_argument):
    logs.log_error('Invalid dict %s for %s.' % (dict_argument, fuzzer_name))
    fuzzer_utils.extract_argument(arguments, constants.DICT_FLAG)

  # If there's no dict argument, check for %target_binary_name%.dict file.
  if (not fuzzer_utils.extract_argument(
      arguments, constants.DICT_FLAG, remove=False)):
    default_dict_path = dictionary_manager.get_default_dictionary_path(
        fuzzer_path)
    if os.path.exists(default_dict_path):
      arguments.append(constants.DICT_FLAG + default_dict_path)

  # Set up scratch directory for writing new units.
  new_testcases_directory = create_corpus_directory('new')

  # Strategy pool is the list of strategies that we attempt to enable, whereas
  # fuzzing strategies is the list of strategies that are enabled. (e.g. if
  # mutator is selected in the pool, but not available for a given target, it
  # would not be added to fuzzing strategies.)
  strategy_pool = strategy_selection.generate_weighted_strategy_pool(
      strategy_list=strategy.LIBFUZZER_STRATEGY_LIST,
      use_generator=True,
      engine_name='libFuzzer')
  strategy_info = pick_strategies(
      strategy_pool,
      fuzzer_path,
      corpus_directory,
      arguments,
      minijail_chroot=minijail_chroot)
  arguments.extend(strategy_info.arguments)

  # Timeout for fuzzer run.
  fuzz_timeout = get_fuzz_timeout(strategy_info.is_mutations_run)

  # Get list of corpus directories.
  # TODO(flowerhack): Implement this to handle corpus sync'ing.
  if environment.platform() == 'FUCHSIA':
    corpus_directories = []
  else:
    corpus_directories = get_corpus_directories(
        corpus_directory,
        new_testcases_directory,
        fuzzer_path,
        strategy_info.fuzzing_strategies,
        strategy_pool,
        minijail_chroot=minijail_chroot,
        allow_corpus_subset=not strategy_info.use_dataflow_tracing)

  corpus_directories.extend(strategy_info.additional_corpus_dirs)

  # Bind corpus directories in minijail.
  if use_minijail:
    artifact_prefix = constants.ARTIFACT_PREFIX_FLAG + '/'
  else:
    artifact_prefix = '%s%s/' % (constants.ARTIFACT_PREFIX_FLAG,
                                 os.path.abspath(
                                     os.path.dirname(testcase_file_path)))
  # Execute the fuzzer binary with original arguments.
  fuzz_result = runner.fuzz(
      corpus_directories,
      fuzz_timeout=fuzz_timeout,
      additional_args=arguments + [artifact_prefix],
      extra_env=strategy_info.extra_env)

  if (not use_minijail and
      fuzz_result.return_code == constants.LIBFUZZER_ERROR_EXITCODE):
    # Minijail returns 1 if the exit code is nonzero.
    # Otherwise: we can assume that a return code of 1 means that libFuzzer
    # itself ran into an error.
    logs.log_error(ENGINE_ERROR_MESSAGE, engine_output=fuzz_result.output)

  log_lines = fuzz_result.output.splitlines()
  # Output can be large, so save some memory by removing reference to the
  # original output which is no longer needed.
  fuzz_result.output = None

  # Check if we crashed, and get the crash testcase path.
  crash_testcase_file_path = None
  for line in log_lines:
    match = re.match(CRASH_TESTCASE_REGEX, line)
    if match:
      crash_testcase_file_path = match.group(1)
      break

  if crash_testcase_file_path:
    # Write the new testcase.
    if use_minijail:
      # Convert chroot relative path to host path. Remove the leading '/' before
      # joining.
      crash_testcase_file_path = os.path.join(minijail_chroot.directory,
                                              crash_testcase_file_path[1:])

    # Copy crash testcase contents into the main testcase path.
    shutil.move(crash_testcase_file_path, testcase_file_path)

  # Print the command output.
  bot_name = environment.get_value('BOT_NAME', '')
  command = fuzz_result.command
  if use_minijail:
    # Remove minijail prefix.
    command = engine_common.strip_minijail_command(command, fuzzer_path)
  print(engine_common.get_log_header(command, bot_name,
                                     fuzz_result.time_executed))

  # Parse stats information based on libFuzzer output.
  parsed_stats = parse_log_stats(log_lines)

  # Extend parsed stats by additional performance features.
  parsed_stats.update(
      stats.parse_performance_features(
          log_lines, strategy_info.fuzzing_strategies, arguments))

  # Set some initial stat overrides.
  timeout_limit = fuzzer_utils.extract_argument(
      arguments, constants.TIMEOUT_FLAG, remove=False)

  expected_duration = runner.get_max_total_time(fuzz_timeout)
  actual_duration = int(fuzz_result.time_executed)
  fuzzing_time_percent = 100 * actual_duration / float(expected_duration)
  stat_overrides = {
      'timeout_limit': int(timeout_limit),
      'expected_duration': expected_duration,
      'actual_duration': actual_duration,
      'fuzzing_time_percent': fuzzing_time_percent,
  }

  # Remove fuzzing arguments before merge and dictionary analysis step.
  remove_fuzzing_arguments(arguments)

  # Make a decision on whether merge step is needed at all. If there are no
  # new units added by libFuzzer run, then no need to do merge at all.
  new_units_added = shell.get_directory_file_count(new_testcases_directory)
  merge_error = None
  if new_units_added:
    # Merge the new units with the initial corpus.
    if corpus_directory not in corpus_directories:
      corpus_directories.append(corpus_directory)

    # If this times out, it's possible that we will miss some units. However, if
    # we're taking >10 minutes to load/merge the corpus something is going very
    # wrong and we probably don't want to make things worse by adding units
    # anyway.

    merge_tmp_dir = None
    if not use_minijail:
      merge_tmp_dir = os.path.join(fuzzer_utils.get_temp_dir(), 'merge_workdir')
      engine_common.recreate_directory(merge_tmp_dir)

    old_corpus_len = shell.get_directory_file_count(corpus_directory)
    merge_directory = create_merge_directory()
    corpus_directories.insert(0, merge_directory)

    if use_minijail:
      bind_corpus_dirs(minijail_chroot, [merge_directory])

    merge_result = runner.merge(
        corpus_directories,
        merge_timeout=engine_common.get_merge_timeout(DEFAULT_MERGE_TIMEOUT),
        tmp_dir=merge_tmp_dir,
        additional_args=arguments)

    move_mergeable_units(merge_directory, corpus_directory)
    new_corpus_len = shell.get_directory_file_count(corpus_directory)
    new_units_added = 0

    merge_error = None
    if merge_result.timed_out:
      merge_error = 'Merging new testcases timed out:'
    elif merge_result.return_code != 0:
      merge_error = 'Merging new testcases failed:'
    else:
      new_units_added = new_corpus_len - old_corpus_len

    stat_overrides['new_units_added'] = new_units_added

    if merge_result.output:
      stat_overrides.update(
          stats.parse_stats_from_merge_log(merge_result.output.splitlines()))
  else:
    stat_overrides['new_units_added'] = 0
    logs.log('Skipped corpus merge since no new units added by fuzzing.')

  # Get corpus size after merge. This removes the duplicate units that were
  # created during this fuzzing session.
  # TODO(flowerhack): Remove this workaround once we can handle corpus sync.
  if environment.platform() != 'FUCHSIA':
    stat_overrides['corpus_size'] = shell.get_directory_file_count(
        corpus_directory)

  # Delete all corpus directories except for the main one. These were temporary
  # directories to store new testcase mutations and have already been merged to
  # main corpus directory.
  if corpus_directory in corpus_directories:
    corpus_directories.remove(corpus_directory)
  for directory in corpus_directories:
    shutil.rmtree(directory, ignore_errors=True)

  if use_minijail:
    unbind_corpus_dirs(minijail_chroot, corpus_directories)

  # Apply overridden stats to the parsed stats prior to dumping.
  parsed_stats.update(stat_overrides)

  # Dump stats data for further uploading to BigQuery.
  engine_common.dump_big_query_data(parsed_stats, testcase_file_path, command)

  # Add custom crash state based on fuzzer name (if needed).
  add_custom_crash_state_if_needed(fuzzer_name, log_lines, parsed_stats)
  for line in log_lines:
    print(line)

  # Add fuzzing strategies used.
  print(engine_common.format_fuzzing_strategies(
      strategy_info.fuzzing_strategies))

  # Add merge error (if any).
  if merge_error:
    print(data_types.CRASH_STACKTRACE_END_MARKER)
    print(merge_error)
    print('Command:',
          get_printable_command(merge_result.command, fuzzer_path,
                                use_minijail))
    print(merge_result.output)

  analyze_and_update_recommended_dictionary(runner, fuzzer_name, log_lines,
                                            corpus_directory, arguments)

  # Close minijail chroot.
  if use_minijail:
    minijail_chroot.close()

  # Record the stats to make them easily searchable in stackdriver.
  if new_units_added:
    logs.log(
        'New units added to corpus: %d.' % new_units_added, stats=parsed_stats)
  else:
    logs.log('No new units found.', stats=parsed_stats)
Beispiel #5
0
def main(argv):
  """Run libFuzzer as specified by argv."""
  atexit.register(fuzzer_utils.cleanup)

  # Initialize variables.
  arguments = argv[1:]
  testcase_file_path = arguments.pop(0)
  target_name = arguments.pop(0)
  fuzzer_name = data_types.fuzz_target_project_qualified_name(
      utils.current_project(), target_name)

  # Initialize log handler.
  logs.configure(
      'run_fuzzer', {
          'fuzzer': fuzzer_name,
          'engine': 'libFuzzer',
          'job_name': environment.get_value('JOB_NAME')
      })

  profiler.start_if_needed('libfuzzer_launcher')

  # Make sure that the fuzzer binary exists.
  build_directory = environment.get_value('BUILD_DIR')
  fuzzer_path = engine_common.find_fuzzer_path(build_directory, target_name)
  if not fuzzer_path:
    # This is an expected case when doing regression testing with old builds
    # that do not have that fuzz target. It can also happen when a host sends a
    # message to an untrusted worker that just restarted and lost information on
    # build directory.
    logs.log_warn('Could not find fuzz target %s.' % target_name)
    return

  # Install signal handler.
  signal.signal(signal.SIGTERM, engine_common.signal_term_handler)

  # Set up temp dir.
  engine_common.recreate_directory(fuzzer_utils.get_temp_dir())

  # Setup minijail if needed.
  use_minijail = environment.get_value('USE_MINIJAIL')
  runner = libfuzzer.get_runner(
      fuzzer_path, temp_dir=fuzzer_utils.get_temp_dir())

  if use_minijail:
    minijail_chroot = runner.chroot
  else:
    minijail_chroot = None

  # Get corpus directory.
  corpus_directory = environment.get_value('FUZZ_CORPUS_DIR')

  # Add common arguments which are necessary to be used for every run.
  arguments = expand_with_common_arguments(arguments)

  # Add sanitizer options to environment that were specified in the .options
  # file and options that this script requires.
  set_sanitizer_options(fuzzer_path)

  # Minimize test argument.
  minimize_to = fuzzer_utils.extract_argument(arguments, MINIMIZE_TO_ARGUMENT)
  minimize_timeout = fuzzer_utils.extract_argument(arguments,
                                                   MINIMIZE_TIMEOUT_ARGUMENT)

  if minimize_to and minimize_timeout:
    minimize_testcase(runner, testcase_file_path, minimize_to,
                      int(minimize_timeout), arguments, use_minijail)
    return

  # Cleanse argument.
  cleanse_to = fuzzer_utils.extract_argument(arguments, CLEANSE_TO_ARGUMENT)
  cleanse_timeout = fuzzer_utils.extract_argument(arguments,
                                                  CLEANSE_TIMEOUT_ARGUMENT)

  if cleanse_to and cleanse_timeout:
    cleanse_testcase(runner, testcase_file_path, cleanse_to,
                     int(cleanse_timeout), arguments, use_minijail)
    return

  # If we don't have a corpus, then that means this is not a fuzzing run.
  if not corpus_directory:
    load_testcase_if_exists(runner, testcase_file_path, fuzzer_name,
                            use_minijail, arguments)
    return

  # We don't have a crash testcase, fuzz.

  # Check dict argument to make sure that it's valid.
  dict_argument = fuzzer_utils.extract_argument(
      arguments, constants.DICT_FLAG, remove=False)
  if dict_argument and not os.path.exists(dict_argument):
    logs.log_error('Invalid dict %s for %s.' % (dict_argument, fuzzer_name))
    fuzzer_utils.extract_argument(arguments, constants.DICT_FLAG)

  # If there's no dict argument, check for %target_binary_name%.dict file.
  if (not fuzzer_utils.extract_argument(
      arguments, constants.DICT_FLAG, remove=False)):
    default_dict_path = dictionary_manager.get_default_dictionary_path(
        fuzzer_path)
    if os.path.exists(default_dict_path):
      arguments.append(constants.DICT_FLAG + default_dict_path)

  fuzzing_strategies = []

  # Select a generator to use for existing testcase mutations.
  generator = _select_generator()
  is_mutations_run = generator != Generator.NONE

  # Timeout for fuzzer run.
  fuzz_timeout = get_fuzz_timeout(is_mutations_run)

  # Get list of corpus directories.
  corpus_directories = get_corpus_directories(
      corpus_directory, fuzzer_path, fuzzing_strategies, minijail_chroot)

  # Bind corpus directories in minijail.
  if use_minijail:
    artifact_prefix = constants.ARTIFACT_PREFIX_FLAG + '/'
  else:
    artifact_prefix = '%s%s/' % (constants.ARTIFACT_PREFIX_FLAG,
                                 os.path.abspath(
                                     os.path.dirname(testcase_file_path)))

  # Generate new testcase mutations using radamsa, etc.
  if is_mutations_run:
    new_testcase_mutations_directory = generate_new_testcase_mutations(
        corpus_directory, fuzzer_name, generator, fuzzing_strategies)
    corpus_directories.append(new_testcase_mutations_directory)
    if use_minijail:
      bind_corpus_dirs(minijail_chroot, [new_testcase_mutations_directory])

  max_len_argument = fuzzer_utils.extract_argument(
      arguments, constants.MAX_LEN_FLAG, remove=False)
  if not max_len_argument and do_random_max_length():
    max_length = random.SystemRandom().randint(1, MAX_VALUE_FOR_MAX_LENGTH)
    arguments.append('%s%d' % (constants.MAX_LEN_FLAG, max_length))
    fuzzing_strategies.append(strategy.RANDOM_MAX_LENGTH_STRATEGY)

  if do_recommended_dictionary():
    if add_recommended_dictionary(arguments, fuzzer_name, fuzzer_path):
      fuzzing_strategies.append(strategy.RECOMMENDED_DICTIONARY_STRATEGY)

  if do_value_profile():
    arguments.append(constants.VALUE_PROFILE_ARGUMENT)
    fuzzing_strategies.append(strategy.VALUE_PROFILE_STRATEGY)

  if do_fork():
    max_fuzz_threads = environment.get_value('MAX_FUZZ_THREADS', 1)
    num_fuzz_processes = max(1, multiprocessing.cpu_count() / max_fuzz_threads)
    arguments.append('%s%d' % (constants.FORK_FLAG, num_fuzz_processes))
    fuzzing_strategies.append(
        '%s_%d' % (strategy.FORK_STRATEGY, num_fuzz_processes))

  # Execute the fuzzer binary with original arguments.
  fuzz_result = runner.fuzz(
      corpus_directories,
      fuzz_timeout=fuzz_timeout,
      additional_args=arguments + [artifact_prefix])

  if (not use_minijail and
      fuzz_result.return_code == constants.LIBFUZZER_ERROR_EXITCODE):
    # Minijail returns 1 if the exit code is nonzero.
    # Otherwise: we can assume that a return code of 1 means that libFuzzer
    # itself ran into an error.
    logs.log_error(ENGINE_ERROR_MESSAGE, engine_output=fuzz_result.output)

  log_lines = fuzz_result.output.splitlines()
  # Output can be large, so save some memory by removing reference to the
  # original output which is no longer needed.
  fuzz_result.output = None

  # Check if we crashed, and get the crash testcase path.
  crash_testcase_file_path = None
  for line in log_lines:
    match = re.match(CRASH_TESTCASE_REGEX, line)
    if match:
      crash_testcase_file_path = match.group(1)
      break

  if crash_testcase_file_path:
    # Write the new testcase.
    if use_minijail:
      # Convert chroot relative path to host path. Remove the leading '/' before
      # joining.
      crash_testcase_file_path = os.path.join(minijail_chroot.directory,
                                              crash_testcase_file_path[1:])

    # Copy crash testcase contents into the main testcase path.
    shutil.move(crash_testcase_file_path, testcase_file_path)

  # Print the command output.
  log_header_format = ('Command: %s\n' 'Bot: %s\n' 'Time ran: %f\n')
  bot_name = environment.get_value('BOT_NAME', '')
  command = fuzz_result.command
  if use_minijail:
    # Remove minijail prefix.
    command = engine_common.strip_minijail_command(command, fuzzer_path)
  print log_header_format % (engine_common.get_command_quoted(command),
                             bot_name, fuzz_result.time_executed)

  # Parse stats information based on libFuzzer output.
  parsed_stats = parse_log_stats(log_lines)

  # Extend parsed stats by additional performance features.
  parsed_stats.update(
      stats.parse_performance_features(log_lines, fuzzing_strategies,
                                       arguments))

  # Set some initial stat overrides.
  timeout_limit = fuzzer_utils.extract_argument(
      arguments, constants.TIMEOUT_FLAG, remove=False)

  expected_duration = runner.get_max_total_time(fuzz_timeout)
  actual_duration = int(fuzz_result.time_executed)
  fuzzing_time_percent = 100 * actual_duration / float(expected_duration)
  stat_overrides = {
      'timeout_limit': int(timeout_limit),
      'expected_duration': expected_duration,
      'actual_duration': actual_duration,
      'fuzzing_time_percent': fuzzing_time_percent,
  }

  # Remove fuzzing arguments before merge and dictionary analysis step.
  remove_fuzzing_arguments(arguments)

  # Make a decision on whether merge step is needed at all. If there are no
  # new units added by libFuzzer run, then no need to do merge at all.
  new_units_added = parsed_stats.get('new_units_added', 0)
  merge_error = None
  if new_units_added:
    # Merge the new units back into the corpus.
    # For merge, main corpus directory should be passed first of all corpus
    # directories.
    if corpus_directory in corpus_directories:
      corpus_directories.remove(corpus_directory)
    corpus_directories = [corpus_directory] + corpus_directories

    # If this times out, it's possible that we will miss some units. However, if
    # we're taking >10 minutes to load/merge the corpus something is going very
    # wrong and we probably don't want to make things worse by adding units
    # anyway.

    merge_tmp_dir = None
    if not use_minijail:
      merge_tmp_dir = os.path.join(fuzzer_utils.get_temp_dir(), 'merge_workdir')
      engine_common.recreate_directory(merge_tmp_dir)

    old_corpus_len = shell.get_directory_file_count(corpus_directory)
    merge_result = runner.merge(
        corpus_directories,
        merge_timeout=engine_common.get_merge_timeout(DEFAULT_MERGE_TIMEOUT),
        tmp_dir=merge_tmp_dir,
        additional_args=arguments)
    new_corpus_len = shell.get_directory_file_count(corpus_directory)
    new_units_added = 0

    merge_error = None
    if merge_result.timed_out:
      merge_error = 'Merging new testcases timed out:'
    elif merge_result.return_code != 0:
      merge_error = 'Merging new testcases failed:'
    else:
      new_units_added = new_corpus_len - old_corpus_len

    stat_overrides['new_units_added'] = new_units_added

    if merge_result.output:
      stat_overrides.update(
          stats.parse_stats_from_merge_log(merge_result.output.splitlines()))
  else:
    stat_overrides['new_units_added'] = 0
    logs.log('Skipped corpus merge since no new units added by fuzzing.')

  # Get corpus size after merge. This removes the duplicate units that were
  # created during this fuzzing session.
  stat_overrides['corpus_size'] = shell.get_directory_file_count(
      corpus_directory)

  # Delete all corpus directories except for the main one. These were temporary
  # directories to store new testcase mutations and have already been merged to
  # main corpus directory.
  if corpus_directory in corpus_directories:
    corpus_directories.remove(corpus_directory)
  for directory in corpus_directories:
    shutil.rmtree(directory, ignore_errors=True)

  if use_minijail:
    unbind_corpus_dirs(minijail_chroot, corpus_directories)

  # Apply overridden stats to the parsed stats prior to dumping.
  parsed_stats.update(stat_overrides)

  # Dump stats data for further uploading to BigQuery.
  engine_common.dump_big_query_data(parsed_stats, testcase_file_path,
                                    LIBFUZZER_PREFIX, fuzzer_name, command)

  # Add custom crash state based on fuzzer name (if needed).
  add_custom_crash_state_if_needed(fuzzer_name, log_lines, parsed_stats)
  for line in log_lines:
    print line

  # Add fuzzing strategies used.
  engine_common.print_fuzzing_strategies(fuzzing_strategies)

  # Add merge error (if any).
  if merge_error:
    print data_types.CRASH_STACKTRACE_END_MARKER
    print merge_error
    print 'Command:', get_printable_command(merge_result.command, fuzzer_path,
                                            use_minijail)
    print merge_result.output

  analyze_and_update_recommended_dictionary(runner, fuzzer_name, log_lines,
                                            corpus_directory, arguments)

  # Close minijail chroot.
  if use_minijail:
    minijail_chroot.close()

  # Whenever new units are added to corpus, record the stats to make them
  # easily searchable in stackdriver.
  if new_units_added:
    logs.log(
        'New units added to corpus: %d.' % new_units_added, stats=parsed_stats)
Beispiel #6
0
 def test_configure_appengine(self):
     """Test configure on App Engine."""
     self.mock._is_running_on_app_engine.return_value = True  # pylint: disable=protected-access
     logs.configure('test')
     self.assertEqual(0, self.mock.dictConfig.call_count)
Beispiel #7
0
    ('/testcase-detail/update-from-trunk', update_from_trunk.Handler),
    ('/testcase-detail/update-issue', update_issue.Handler),
    ('/testcases', testcase_list.Handler),
    ('/testcases/load', testcase_list.JsonHandler),
    ('/upload-testcase', upload_testcase.Handler),
    ('/upload-testcase/get-url-oauth', upload_testcase.UploadUrlHandlerOAuth),
    ('/upload-testcase/prepare', upload_testcase.PrepareUploadHandler),
    ('/upload-testcase/load', upload_testcase.JsonHandler),
    ('/upload-testcase/upload', upload_testcase.UploadHandler),
    ('/upload-testcase/upload-oauth', upload_testcase.UploadHandlerOAuth),
    ('/update-job', jobs.UpdateJob),
    ('/update-job-template', jobs.UpdateJobTemplate),
    ('/viewer', viewer.Handler),
]

logs.configure('appengine')
config = local_config.GAEConfig()
main_domain = config.get('domains.main')
redirect_domains = config.get('domains.redirects')


def redirect_handler():
    """Redirection handler."""
    if not redirect_domains:
        return None

    if request.host in redirect_domains:
        return redirect('https://' + main_domain + request.full_path)

    return None
Beispiel #8
0
def main(argv):
    """Run afl as specified by argv."""
    atexit.register(fuzzer_utils.cleanup)

    # Initialize variables.
    _, testcase_file_path, target_name = argv[:3]
    input_directory = environment.get_value('FUZZ_CORPUS_DIR')
    fuzzer_name = data_types.fuzz_target_project_qualified_name(
        utils.current_project(), target_name)

    # Initialize log handler.
    logs.configure(
        'run_fuzzer', {
            'fuzzer': fuzzer_name,
            'engine': 'afl',
            'job_name': environment.get_value('JOB_NAME')
        })

    build_directory = environment.get_value('BUILD_DIR')
    fuzzer_path = engine_common.find_fuzzer_path(build_directory, target_name)
    if not fuzzer_path:
        # This is an expected case when doing regression testing with old builds
        # that do not have that fuzz target. It can also happen when a host sends a
        # message to an untrusted worker that just restarted and lost information on
        # build directory.
        logs.log_warn('Could not find fuzz target %s.' % target_name)
        return

    # Install signal handler.
    signal.signal(signal.SIGTERM, engine_common.signal_term_handler)

    # Set up temp dir.
    engine_common.recreate_directory(fuzzer_utils.get_temp_dir())

    config = AflConfig.from_target_path(fuzzer_path)

    runner = AflRunner(fuzzer_path, config, testcase_file_path,
                       input_directory)

    # Add *SAN_OPTIONS overrides from .options file.
    engine_common.process_sanitizer_options_overrides(fuzzer_path)

    # If we don't have a corpus, then that means this is not a fuzzing run.
    if not input_directory:
        load_testcase_if_exists(runner, testcase_file_path)
        return

    # Make sure afl won't exit because of bad sanitizer options.
    set_additional_sanitizer_options_for_afl_fuzz()

    # Execute afl-fuzz on the fuzzing target.
    fuzz_result = runner.fuzz()

    # Print info for the fuzzer logs.
    command = fuzz_result.command
    print('Command: {0}\n'
          'Bot: {1}\n'
          'Time ran: {2}\n').format(engine_common.get_command_quoted(command),
                                    BOT_NAME, fuzz_result.time_executed)

    print fuzz_result.output
    runner.strategies.print_strategies()

    if fuzz_result.return_code:
        # If AFL returned a non-zero return code quit now without getting stats,
        # since they would be meaningless.
        print runner.fuzzer_stderr
        return

    stats_getter = stats.StatsGetter(runner.afl_output.stats_path,
                                     config.dict_path)
    try:
        new_units_generated, new_units_added, corpus_size = (
            runner.libfuzzerize_corpus())
        stats_getter.set_stats(fuzz_result.time_executed, new_units_generated,
                               new_units_added, corpus_size, runner.strategies,
                               runner.fuzzer_stderr, fuzz_result.output)

        engine_common.dump_big_query_data(stats_getter.stats,
                                          testcase_file_path, AFL_PREFIX,
                                          fuzzer_name, command)

    finally:
        print runner.fuzzer_stderr

    # Whenever new units are added to corpus, record the stats to make them
    # easily searchable in stackdriver.
    if new_units_added:
        logs.log('New units added to corpus: %d.' % new_units_added,
                 stats=stats_getter.stats)