Exemple #1
0
def beat(previous_state, log_filename):
    """Run a cycle of heartbeat checks to ensure bot is running."""
    # Handle case when run_bot.py script is stuck. If yes, kill its process.
    task_end_time = tasks.get_task_end_time()
    if psutil and task_end_time and dates.time_has_expired(
            task_end_time, seconds=tasks.TASK_COMPLETION_BUFFER):

        # Get absolute path to |run_bot| script. We use this to identify unique
        # instances of bot running on a particular host.
        startup_scripts_directory = environment.get_startup_scripts_directory()
        bot_file_path = os.path.join(startup_scripts_directory, 'run_bot')

        for process in psutil.process_iter():
            try:
                command_line = ' '.join(process.cmdline())
            except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
                sys.exc_clear()
                continue

            # Find the process running the main bot script.
            if bot_file_path not in command_line:
                continue

            process_id = process.pid
            logs.log('Killing stale bot (pid %d) which seems to have stuck.' %
                     process_id)
            try:
                process_handler.terminate_root_and_child_processes(process_id)
            except Exception:
                logs.log_error('Failed to terminate stale bot processes.')

        # Minor cleanup to avoid disk space issues on bot restart.
        process_handler.terminate_stale_application_instances()
        shell.clear_temp_directory()
        shell.clear_testcase_directories()

        # Concerned stale processes should be killed. Now, delete the stale task.
        tasks.track_task_end()

    # Figure out when the log file was last modified.
    try:
        current_state = str(os.path.getmtime(log_filename))
    except Exception:
        current_state = None

    logs.log('Old state %s, current state %s.' %
             (previous_state, current_state))

    # Only update the heartbeat if the log file was modified.
    if current_state and current_state != previous_state:
        # Try updating the heartbeat. If an error occurs, just
        # wait and return None.
        if not data_handler.update_heartbeat():
            return None
        # Heartbeat is successfully updated.

    return current_state
Exemple #2
0
  def test_clear_testcase_directories(self):
    """Test clearing test directories."""
    fuzz_inputs = os.environ['FUZZ_INPUTS']
    worker_fuzz_inputs = file_host.rebase_to_worker_root(fuzz_inputs)

    fuzz_inputs_disk = os.environ['FUZZ_INPUTS_DISK']
    worker_fuzz_inputs_disk = file_host.rebase_to_worker_root(fuzz_inputs_disk)

    with open(os.path.join(worker_fuzz_inputs, 'file'), 'w') as f:
      f.write('blah')

    with open(os.path.join(worker_fuzz_inputs_disk, 'file'), 'w') as f:
      f.write('blah2')

    shell.clear_testcase_directories()
    self.assertEqual(len(os.listdir(worker_fuzz_inputs)), 0)
    self.assertEqual(len(os.listdir(worker_fuzz_inputs_disk)), 0)
Exemple #3
0
def cleanup_task_state():
  """Cleans state before and after a task is executed."""
  # Cleanup stale processes.
  process_handler.cleanup_stale_processes()

  # Clear build urls, temp and testcase directories.
  shell.clear_build_urls_directory()
  shell.clear_crash_stacktraces_directory()
  shell.clear_testcase_directories()
  shell.clear_temp_directory()
  shell.clear_system_temp_directory()
  shell.clear_device_temp_directories()

  # Reset memory tool environment variables.
  environment.reset_current_memory_tool_options()

  # Call python's garbage collector.
  utils.python_gc()
Exemple #4
0
def setup_testcase(testcase, fuzzer_override=None):
    """Sets up the testcase and needed dependencies like fuzzer,
  data bundle, etc."""
    fuzzer_name = fuzzer_override or testcase.fuzzer_name
    job_type = testcase.job_type
    task_name = environment.get_value('TASK_NAME')
    testcase_fail_wait = environment.get_value('FAIL_WAIT')
    testcase_id = testcase.key.id()

    # Clear testcase directories.
    shell.clear_testcase_directories()

    # Adjust the test timeout value if this is coming from an user uploaded
    # testcase.
    _set_timeout_value_from_user_upload(testcase_id)

    # Update the fuzzer if necessary in order to get the updated data bundle.
    if fuzzer_name:
        try:
            update_successful = update_fuzzer_and_data_bundles(fuzzer_name)
        except errors.InvalidFuzzerError:
            # Close testcase and don't recreate tasks if this fuzzer is invalid.
            testcase.open = False
            testcase.fixed = 'NA'
            testcase.set_metadata('fuzzer_was_deleted', True)
            logs.log_error('Closed testcase %d with invalid fuzzer %s.' %
                           (testcase_id, fuzzer_name))

            error_message = 'Fuzzer %s no longer exists' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            return None, None, None

        if not update_successful:
            error_message = 'Unable to setup fuzzer %s' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            tasks.add_task(task_name,
                           testcase_id,
                           job_type,
                           wait_time=testcase_fail_wait)
            return None, None, None

    # Extract the testcase and any of its resources to the input directory.
    file_list, input_directory, testcase_file_path = unpack_testcase(testcase)
    if not file_list:
        error_message = 'Unable to setup testcase %s' % testcase_file_path
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        tasks.add_task(task_name,
                       testcase_id,
                       job_type,
                       wait_time=testcase_fail_wait)
        return None, None, None

    # For Android/Fuchsia, we need to sync our local testcases directory with the
    # one on the device.
    if environment.platform() == 'ANDROID':
        _copy_testcase_to_device_and_setup_environment(testcase,
                                                       testcase_file_path)

    # Push testcases to worker.
    if environment.is_trusted_host():
        from bot.untrusted_runner import file_host
        file_host.push_testcases_to_worker()

    # Copy global blacklist into local blacklist.
    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # Get local blacklist without this testcase's entry.
        leak_blacklist.copy_global_to_local_blacklist(
            excluded_testcase=testcase)

    prepare_environment_for_testcase(testcase)

    return file_list, input_directory, testcase_file_path
Exemple #5
0
def execute_task(metadata_id, job_type):
  """Unpack a bundled testcase archive and create analyze jobs for each item."""
  metadata = ndb.Key(data_types.BundledArchiveMetadata, int(metadata_id)).get()
  if not metadata:
    logs.log_error('Invalid bundle metadata id %s.' % metadata_id)
    return

  bot_name = environment.get_value('BOT_NAME')
  upload_metadata = data_types.TestcaseUploadMetadata.query(
      data_types.TestcaseUploadMetadata.blobstore_key ==
      metadata.blobstore_key).get()
  if not upload_metadata:
    logs.log_error('Invalid upload metadata key %s.' % metadata.blobstore_key)
    return

  # Update the upload metadata with this bot name.
  upload_metadata.bot_name = bot_name
  upload_metadata.put()

  # We can't use FUZZ_INPUTS directory since it is constrained
  # by tmpfs limits.
  testcases_directory = environment.get_value('FUZZ_INPUTS_DISK')

  # Retrieve multi-testcase archive.
  archive_path = os.path.join(testcases_directory, metadata.archive_filename)
  if not blobs.read_blob_to_disk(metadata.blobstore_key, archive_path):
    logs.log_error('Could not retrieve archive for bundle %d.' % metadata_id)
    tasks.add_task('unpack', metadata_id, job_type)
    return

  try:
    archive.unpack(archive_path, testcases_directory)
  except:
    logs.log_error('Could not unpack archive for bundle %d.' % metadata_id)
    tasks.add_task('unpack', metadata_id, job_type)
    return

  archive_state = data_types.ArchiveStatus.NONE
  bundled = True
  file_list = archive.get_file_list(archive_path)
  for file_path in file_list:
    absolute_file_path = os.path.join(testcases_directory, file_path)
    filename = os.path.basename(absolute_file_path)

    # Only files are actual testcases. Skip directories.
    if not os.path.isfile(absolute_file_path):
      continue

    try:
      file_handle = open(absolute_file_path, 'rb')
      blob_key = blobs.write_blob(file_handle)
      file_handle.close()
    except:
      blob_key = None

    if not blob_key:
      logs.log_error(
          'Could not write testcase %s to blobstore.' % absolute_file_path)
      continue

    data_handler.create_user_uploaded_testcase(
        blob_key, metadata.blobstore_key, archive_state,
        metadata.archive_filename, filename, metadata.timeout,
        metadata.job_type, metadata.job_queue, metadata.http_flag,
        metadata.gestures, metadata.additional_arguments,
        metadata.bug_information, metadata.crash_revision,
        metadata.uploader_email, metadata.platform_id,
        metadata.app_launch_command, metadata.fuzzer_name,
        metadata.overridden_fuzzer_name, metadata.fuzzer_binary_name, bundled,
        upload_metadata.retries, upload_metadata.bug_summary_update_flag)

  # The upload metadata for the archive is not needed anymore since we created
  # one for each testcase.
  upload_metadata.key.delete()

  shell.clear_testcase_directories()
Exemple #6
0
def setup_testcase(testcase):
    """Sets up the testcase and needed dependencies like fuzzer,
  data bundle, etc."""
    fuzzer_name = testcase.fuzzer_name
    job_type = testcase.job_type
    task_name = environment.get_value('TASK_NAME')
    testcase_fail_wait = environment.get_value('FAIL_WAIT')
    testcase_id = testcase.key.id()

    # Clear testcase directories.
    shell.clear_testcase_directories()

    # Setup memory debugging tool environment.
    environment.reset_current_memory_tool_options(
        redzone_size=testcase.redzone)

    # Adjust the test timeout value if this is coming from an user uploaded
    # testcase.
    _set_timeout_value_from_user_upload(testcase_id)

    if task_name == 'minimize':
        # Allow minimizing with a different fuzzer set up.
        minimize_fuzzer_override = environment.get_value(
            'MINIMIZE_FUZZER_OVERRIDE')
        fuzzer_name = minimize_fuzzer_override or fuzzer_name

    # Update the fuzzer if necessary in order to get the updated data bundle.
    if fuzzer_name:
        try:
            update_successful = update_fuzzer_and_data_bundles(fuzzer_name)
        except errors.InvalidFuzzerError:
            # Close testcase and don't recreate tasks if this fuzzer is invalid.
            testcase.open = False
            testcase.fixed = 'NA'
            testcase.set_metadata('fuzzer_was_deleted', True)
            logs.log_error('Closed testcase %d with invalid fuzzer %s.' %
                           (testcase_id, fuzzer_name))

            error_message = 'Fuzzer %s no longer exists.' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            return None, None, None

        if not update_successful:
            error_message = 'Unable to setup fuzzer %s.' % fuzzer_name
            data_handler.update_testcase_comment(testcase,
                                                 data_types.TaskState.ERROR,
                                                 error_message)
            tasks.add_task(task_name,
                           testcase_id,
                           job_type,
                           wait_time=testcase_fail_wait)
            return None, None, None

    # Extract the testcase and any of its resources to the input directory.
    file_list, input_directory, testcase_file_path = unpack_testcase(testcase)
    if not file_list:
        error_message = 'Unable to setup testcase %s.' % testcase_file_path
        data_handler.update_testcase_comment(testcase,
                                             data_types.TaskState.ERROR,
                                             error_message)
        tasks.add_task(task_name,
                       testcase_id,
                       job_type,
                       wait_time=testcase_fail_wait)
        return None, None, None

    # For Android/Fuchsia, we need to sync our local testcases directory with the
    # one on the device.
    if environment.platform() == 'ANDROID':
        _copy_testcase_to_device_and_setup_environment(testcase,
                                                       testcase_file_path)

    if environment.platform() == 'FUCHSIA':
        fuchsia.device.copy_testcase_to_device(testcase_file_path)

    # Push testcases to worker.
    if environment.is_trusted_host():
        from bot.untrusted_runner import file_host
        file_host.push_testcases_to_worker()

    # Copy global blacklist into local blacklist.
    is_lsan_enabled = environment.get_value('LSAN')
    if is_lsan_enabled:
        # Get local blacklist without this testcase's entry.
        leak_blacklist.copy_global_to_local_blacklist(
            excluded_testcase=testcase)

    # Setup environment variable for windows size and location properties.
    # Explicitly use empty string to indicate use of default window properties.
    if hasattr(testcase, 'window_argument'):
        environment.set_value('WINDOW_ARG', testcase.window_argument)

    # Adjust timeout based on the stored multiplier (if available).
    if hasattr(testcase, 'timeout_multiplier') and testcase.timeout_multiplier:
        test_timeout = environment.get_value('TEST_TIMEOUT')
        environment.set_value('TEST_TIMEOUT',
                              int(test_timeout * testcase.timeout_multiplier))

    # Override APP_ARGS with minimized arguments (if available).
    if (hasattr(testcase, 'minimized_arguments')
            and testcase.minimized_arguments):
        environment.set_value('APP_ARGS', testcase.minimized_arguments)

    # Add FUZZ_TARGET to environment if this is a fuzz target testcase.
    fuzz_target = testcase.get_metadata('fuzzer_binary_name')
    if fuzz_target:
        environment.set_value('FUZZ_TARGET', fuzz_target)

    return file_list, input_directory, testcase_file_path
def do_libfuzzer_minimization(testcase, testcase_file_path):
  """Use libFuzzer's built-in minimizer where appropriate."""
  is_overriden_job = bool(environment.get_value('ORIGINAL_JOB_NAME'))

  def handle_unreproducible():
    # Be more lenient with marking testcases as unreproducible when this is a
    # job override.
    if is_overriden_job:
      _skip_minimization(testcase, 'Unreproducible on overridden job.')
    else:
      task_creation.mark_unreproducible_if_flaky(testcase, True)

  timeout = environment.get_value('LIBFUZZER_MINIMIZATION_TIMEOUT', 180)
  rounds = environment.get_value('LIBFUZZER_MINIMIZATION_ROUNDS', 10)
  current_testcase_path = testcase_file_path
  last_crash_result = None

  # Get initial crash state.
  initial_crash_result = _run_libfuzzer_testcase(testcase, testcase_file_path)
  if not initial_crash_result.is_crash():
    logs.log_warn('Did not crash. Output:\n' +
                  initial_crash_result.get_stacktrace(symbolized=True))
    handle_unreproducible()
    return

  if testcase.security_flag != initial_crash_result.is_security_issue():
    logs.log_warn('Security flag does not match.')
    handle_unreproducible()
    return

  task_creation.mark_unreproducible_if_flaky(testcase, False)

  expected_state = initial_crash_result.get_symbolized_data()
  logs.log('Initial crash state: %s\n' % expected_state.crash_state)

  # We attempt minimization multiple times in case one round results in an
  # incorrect state, or runs into another issue such as a slow unit.
  for round_number in range(1, rounds + 1):
    logs.log('Minimizing round %d.' % round_number)
    output_file_path, crash_result = _run_libfuzzer_tool(
        'minimize',
        testcase,
        current_testcase_path,
        timeout,
        expected_state.crash_state,
        set_dedup_flags=True)
    if output_file_path:
      last_crash_result = crash_result
      current_testcase_path = output_file_path

  if not last_crash_result:
    repro_command = tests.get_command_line_for_application(
        file_to_run=testcase_file_path, needs_http=testcase.http_flag)
    _skip_minimization(
        testcase,
        'LibFuzzer minimization failed.',
        crash_result=initial_crash_result,
        command=repro_command)
    return

  logs.log('LibFuzzer minimization succeeded.')

  if utils.is_oss_fuzz():
    # Scrub the testcase of non-essential data.
    cleansed_testcase_path = do_libfuzzer_cleanse(
        testcase, current_testcase_path, expected_state.crash_state)
    if cleansed_testcase_path:
      current_testcase_path = cleansed_testcase_path

  # Finalize the test case if we were able to reproduce it.
  repro_command = tests.get_command_line_for_application(
      file_to_run=current_testcase_path, needs_http=testcase.http_flag)
  finalize_testcase(testcase.key.id(), repro_command, last_crash_result)

  # Clean up after we're done.
  shell.clear_testcase_directories()