Esempio n. 1
0
def cleanup():
  """Clean up temporary metadata."""
  shell.remove_directory(get_temp_dir())
Esempio n. 2
0
def _cleanup():
    """Clean up after running the tool."""
    temp_directory = environment.get_value('ROOT_DIR')
    assert 'tmp' in temp_directory
    shell.remove_directory(temp_directory)
Esempio n. 3
0
def update_fuzzer_and_data_bundles(fuzzer_name):
  """Update the fuzzer with a given name if necessary."""
  fuzzer = data_types.Fuzzer.query(data_types.Fuzzer.name == fuzzer_name).get()
  if not fuzzer:
    logs.log_error('No fuzzer exists with name %s.' % fuzzer_name)
    raise errors.InvalidFuzzerError

  # Set some helper environment variables.
  fuzzer_directory = get_fuzzer_directory(fuzzer_name)
  environment.set_value('FUZZER_DIR', fuzzer_directory)
  environment.set_value('UNTRUSTED_CONTENT', fuzzer.untrusted_content)

  # Adjust the test timeout, if user has provided one.
  if fuzzer.timeout:
    environment.set_value('TEST_TIMEOUT', fuzzer.timeout)

    # Increase fuzz test timeout if the fuzzer timeout is higher than its
    # current value.
    fuzz_test_timeout = environment.get_value('FUZZ_TEST_TIMEOUT')
    if fuzz_test_timeout and fuzz_test_timeout < fuzzer.timeout:
      environment.set_value('FUZZ_TEST_TIMEOUT', fuzzer.timeout)

  # Adjust the max testcases if this fuzzer has specified a lower limit.
  max_testcases = environment.get_value('MAX_TESTCASES')
  if fuzzer.max_testcases and fuzzer.max_testcases < max_testcases:
    environment.set_value('MAX_TESTCASES', fuzzer.max_testcases)

  # Check for updates to this fuzzer.
  version_file = os.path.join(fuzzer_directory, '.%s_version' % fuzzer_name)
  if (not fuzzer.builtin and
      revisions.needs_update(version_file, fuzzer.revision)):
    logs.log('Fuzzer update was found, updating.')

    # Clear the old fuzzer directory if it exists.
    if not shell.remove_directory(fuzzer_directory, recreate=True):
      logs.log_error('Failed to clear fuzzer directory.')
      return False

    # Copy the archive to local disk and unpack it.
    archive_path = os.path.join(fuzzer_directory, fuzzer.filename)
    if not blobs.read_blob_to_disk(fuzzer.blobstore_key, archive_path):
      logs.log_error('Failed to copy fuzzer archive.')
      return False

    try:
      archive.unpack(archive_path, fuzzer_directory)
    except Exception:
      error_message = ('Failed to unpack fuzzer archive %s '
                       '(bad archive or unsupported format).') % fuzzer.filename
      logs.log_error(error_message)
      fuzzer_logs.upload_script_log(
          'Fatal error: ' + error_message, fuzzer_name=fuzzer_name)
      return False

    fuzzer_path = os.path.join(fuzzer_directory, fuzzer.executable_path)
    if not os.path.exists(fuzzer_path):
      error_message = ('Fuzzer executable %s not found. '
                       'Check fuzzer configuration.') % fuzzer.executable_path
      logs.log_error(error_message)
      fuzzer_logs.upload_script_log(
          'Fatal error: ' + error_message, fuzzer_name=fuzzer_name)
      return False

    # Make fuzzer executable.
    os.chmod(fuzzer_path, 0o750)

    # Cleanup unneeded archive.
    shell.remove_file(archive_path)

    # Save the current revision of this fuzzer in a file for later checks.
    revisions.write_revision_to_revision_file(version_file, fuzzer.revision)
    logs.log('Updated fuzzer to revision %d.' % fuzzer.revision)

  # Setup data bundles associated with this fuzzer.
  data_bundles = ndb_utils.get_all_from_query(
      data_types.DataBundle.query(
          data_types.DataBundle.name == fuzzer.data_bundle_name))
  for data_bundle in data_bundles:
    if not update_data_bundle(fuzzer, data_bundle):
      return False

  # Setup environment variable for launcher script path.
  if fuzzer.launcher_script:
    fuzzer_launcher_path = shell.get_execute_command(
        os.path.join(fuzzer_directory, fuzzer.launcher_script))
    environment.set_value('LAUNCHER_PATH', fuzzer_launcher_path)

  return True
Esempio n. 4
0
def recreate_directory(directory_path):
    """Delete directory if exists, create empty directory. Throw an exception if
  either fails."""
    if not shell.remove_directory(directory_path, recreate=True):
        raise Exception('Failed to recreate directory: ' + directory_path)
Esempio n. 5
0
def flash_to_latest_build_if_needed():
    """Wipes user data, resetting the device to original factory state."""
    if environment.get_value('LOCAL_DEVELOPMENT'):
        # Don't reimage local development devices.
        return

    run_timeout = environment.get_value('RUN_TIMEOUT')
    if run_timeout:
        # If we have a run timeout, then we are already scheduled to bail out and
        # will be probably get re-imaged. E.g. using frameworks like Tradefed.
        return

    # Check if a flash is needed based on last recorded flash time.
    last_flash_time = persistent_cache.get_value(
        constants.LAST_FLASH_TIME_KEY,
        constructor=datetime.datetime.utcfromtimestamp)
    needs_flash = last_flash_time is None or dates.time_has_expired(
        last_flash_time, seconds=FLASH_INTERVAL)
    if not needs_flash:
        return

    build_info = {}
    if adb.is_gce():
        adb.recreate_gce_device()
    else:
        # Physical device.
        is_google_device = settings.is_google_device()
        if is_google_device is None:
            logs.log_error('Unable to query device. Reimaging failed.')
            adb.bad_state_reached()

        elif not is_google_device:
            # We can't reimage these, skip.
            logs.log('Non-Google device found, skipping reimage.')
            return

        else:
            # For Google devices.
            # Check if both |BUILD_BRANCH| and |BUILD_TARGET| environment variables
            # are set. If not, we don't have enough data for reimaging and hence
            # we bail out.
            branch = environment.get_value('BUILD_BRANCH')
            target = environment.get_value('BUILD_TARGET')
            if not target:
                # We default to userdebug configuration.
                build_params = settings.get_build_parameters()
                if build_params:
                    target = build_params.get('target') + '-userdebug'

                    # Cache target in environment. This is also useful for cases when
                    # device is bricked and we don't have this information available.
                    environment.set_value('BUILD_TARGET', target)

            if not branch or not target:
                logs.log_warn(
                    'BUILD_BRANCH and BUILD_TARGET are not set, skipping reimage.'
                )
                return

            # Download the latest build artifact for this branch and target.
            build_info = fetch_artifact.get_latest_artifact_info(
                branch, target)
            if not build_info:
                logs.log_error(
                    'Unable to fetch information on latest build artifact for '
                    'branch %s and target %s.' % (branch, target))
                return

            # Check if our local build matches the latest build. If not, we will
            # download it.
            build_id = build_info['bid']
            target = build_info['target']
            image_directory = environment.get_value('IMAGES_DIR')
            last_build_info = persistent_cache.get_value(
                constants.LAST_FLASH_BUILD_KEY)
            if not last_build_info or last_build_info['bid'] != build_id:
                # Clean up the images directory first.
                shell.remove_directory(image_directory, recreate=True)

                # We have a new build, download the build artifacts for it.
                for image_regex in FLASH_IMAGE_REGEXES:
                    image_file_path = fetch_artifact.get(
                        build_id, target, image_regex, image_directory)
                    if not image_file_path:
                        logs.log_error(
                            'Failed to download image artifact %s for '
                            'branch %s and target %s.' %
                            (image_file_path, branch, target))
                        return
                    if image_file_path.endswith('.zip'):
                        archive.unpack(image_file_path, image_directory)

            # We do one device flash at a time on one host, otherwise we run into
            # failures and device being stuck in a bad state.
            flash_lock_key_name = 'flash:%s' % socket.gethostname()
            if not locks.acquire_lock(flash_lock_key_name, by_zone=True):
                logs.log_error(
                    'Failed to acquire lock for reimaging, exiting.')
                return

            logs.log('Reimaging started.')
            logs.log('Rebooting into bootloader mode.')
            for _ in range(FLASH_RETRIES):
                adb.run_as_root()
                adb.run_command(['reboot-bootloader'])
                time.sleep(FLASH_REBOOT_BOOTLOADER_WAIT)
                adb.run_fastboot_command(['oem', 'off-mode-charge', '0'])
                adb.run_fastboot_command(['-w', 'reboot-bootloader'])

                for partition, partition_image_filename in FLASH_IMAGE_FILES:
                    partition_image_file_path = os.path.join(
                        image_directory, partition_image_filename)
                    adb.run_fastboot_command(
                        ['flash', partition, partition_image_file_path])
                    if partition in ['bootloader', 'radio']:
                        adb.run_fastboot_command(['reboot-bootloader'])

                # Disable ramdump to avoid capturing ramdumps during kernel crashes.
                # This causes device lockup of several minutes during boot and we intend
                # to analyze them ourselves.
                adb.run_fastboot_command(['oem', 'ramdump', 'disable'])

                adb.run_fastboot_command('reboot')
                time.sleep(FLASH_REBOOT_WAIT)

                if adb.get_device_state() == 'device':
                    break
                logs.log_error('Reimaging failed, retrying.')

            locks.release_lock(flash_lock_key_name, by_zone=True)

    if adb.get_device_state() != 'device':
        logs.log_error('Unable to find device. Reimaging failed.')
        adb.bad_state_reached()

    logs.log('Reimaging finished.')

    # Reset all of our persistent keys after wipe.
    persistent_cache.delete_value(constants.BUILD_PROP_MD5_KEY)
    persistent_cache.delete_value(constants.LAST_TEST_ACCOUNT_CHECK_KEY)
    persistent_cache.set_value(constants.LAST_FLASH_BUILD_KEY, build_info)
    persistent_cache.set_value(constants.LAST_FLASH_TIME_KEY, time.time())
Esempio n. 6
0
 def tearDown(self):
     shell.remove_directory(self.build_directory)
Esempio n. 7
0
 def cleanup(self):
     """Cleanup state."""
     for path in self._created_directories:
         shell.remove_directory(path)
Esempio n. 8
0
def unpack_crash_testcases(crash_testcases_directory):
    """Unpacks the old crash testcases in the provided directory."""
    for testcase in ndb_utils.get_all_from_model(data_types.Testcase):
        testcase_id = testcase.key.id()

        # 1. If we have already stored the testcase, then just skip.
        if testcase_id in STORED_TESTCASES_LIST:
            continue

        # 2. Make sure that it is a unique crash testcase. Ignore duplicates,
        # uploaded repros.
        if testcase.status != 'Processed':
            continue

        # 3. Check if the testcase is fixed. If not, skip.
        if testcase.open:
            continue

        # 4. Check if the testcase has a minimized repro. If not, skip.
        if not testcase.minimized_keys or testcase.minimized_keys == 'NA':
            continue

        # 5. Only use testcases that have bugs associated with them.
        if not testcase.bug_information:
            continue

        # 6. Existing IPC testcases are un-interesting and unused in furthur
        # mutations. Due to size bloat, ignoring these for now.
        if testcase.absolute_path.endswith(tests.IPCDUMP_EXTENSION):
            continue

        # 7. Ignore testcases that are archives (e.g. Langfuzz fuzzer tests).
        if archive.get_archive_type(testcase.absolute_path):
            continue

        # 8. Skip in-process fuzzer testcases, since these are only applicable to
        # fuzz targets and don't run with blackbox binaries.
        if testcase.fuzzer_name and testcase.fuzzer_name in [
                'afl', 'libFuzzer'
        ]:
            continue

        # Un-pack testcase.
        try:
            _, input_directory, _ = setup.unpack_testcase(testcase)
        except Exception:
            logs.log_error('Failed to unpack testcase %d.' % testcase.key.id())
            continue

        # Move this to our crash testcases directory.
        crash_testcase_directory = os.path.join(crash_testcases_directory,
                                                str(testcase_id))
        shell.move(input_directory, crash_testcase_directory)

        # Re-create input directory for unpacking testcase in next iteration.
        shell.create_directory(input_directory)

        STORED_TESTCASES_LIST.append(testcase_id)

    # Remove testcase directories that exceed the max size limit.
    for directory_name in os.listdir(crash_testcases_directory):
        directory_path = os.path.join(crash_testcases_directory,
                                      directory_name)
        if not os.path.isdir(directory_path):
            continue

        if shell.get_directory_size(
                directory_path) <= MAX_TESTCASE_DIRECTORY_SIZE:
            continue

        shell.remove_directory(directory_path)

    # Rename all fuzzed testcase files as regular files.
    for root, _, files in os.walk(crash_testcases_directory):
        for filename in files:
            if not filename.startswith(tests.FUZZ_PREFIX):
                continue

            file_path = os.path.join(root, filename)
            stripped_file_name = os.path.basename(
                file_path)[len(tests.FUZZ_PREFIX):]
            stripped_file_path = os.path.join(os.path.dirname(file_path),
                                              stripped_file_name)
            try:
                os.rename(file_path, stripped_file_path)
            except:
                raise Exception('Failed to rename testcase %s.' % file_path)

    # Remove empty files and dirs to avoid the case where a fuzzer randomly
    # chooses an empty dir/file and generates zero testcases.
    shell.remove_empty_files(crash_testcases_directory)
    shell.remove_empty_directories(crash_testcases_directory)
Esempio n. 9
0
def download_system_symbols_if_needed(symbols_directory):
    """Download system libraries from |SYMBOLS_URL| and cache locally."""
    # For local testing, we likely do not have access to the authenticated cloud
    # storage bucket with the symbols. In this case, just bail out.
    if environment.get_value('LOCAL_DEVELOPMENT', False):
        return

    # We have archived symbols for google builds only.
    if not device.google_device():
        return

    # Get the build fingerprint parameters.
    build_params = device.get_build_parameters()
    if not build_params:
        logs.log_error('Unable to determine build parameters.')
        return
    build_id = build_params.get('build_id')
    target = build_params.get('target')
    type = build_params.get('type')
    if not build_id or not target or not type:
        logs.log_error('Null build parameters found, exiting.')
        return

    # Check if we already have the symbols in cache.
    build_params_check_path = os.path.join(symbols_directory,
                                           '.cached_build_params')
    cached_build_params = utils.read_data_from_file(build_params_check_path,
                                                    eval_data=True)
    if cached_build_params and cmp(cached_build_params, build_params) == 0:
        # No work to do, same system symbols already in cache.
        return

    symbols_archive_filename = '%s-symbols-%s.zip' % (target, build_id)
    symbols_archive_path = os.path.join(symbols_directory,
                                        symbols_archive_filename)

    # Delete existing symbols directory first.
    shell.remove_directory(symbols_directory, recreate=True)

    # Fetch symbol file from cloud storage cache (if available).
    found_in_cache = storage.get_file_from_cache_if_exists(
        symbols_archive_path, update_modification_time_on_access=False)
    if not found_in_cache:
        # Include type and sanitizer information in the target.
        target_with_type_and_san = '%s-%s' % (target, type)
        tool_suffix = environment.get_value('SANITIZER_TOOL_NAME')
        if tool_suffix and not tool_suffix in target_with_type_and_san:
            target_with_type_and_san += '_%s' % tool_suffix

        # Fetch the artifact now.
        fetch_artifact.get(build_id, target_with_type_and_san,
                           symbols_archive_filename, symbols_directory)

    if not os.path.exists(symbols_archive_path):
        logs.log_error('Unable to locate symbols archive %s.' %
                       symbols_archive_path)
        return

    # Store the artifact for later use or for use by other bots.
    storage.store_file_in_cache(symbols_archive_path)

    archive.unpack(symbols_archive_path, symbols_directory, trusted=True)
    shell.remove_file(symbols_archive_path)
    utils.write_data_to_file(build_params, build_params_check_path)
Esempio n. 10
0
def execute_task(fuzzer_name, job_type):
    """Execute ML RNN training task.

  The task is training RNN model by default. If more models are developed,
  arguments can be modified to specify which model to use.

  Args:
    fuzzer_name: Name of fuzzer, e.g. libpng_read_fuzzer.
    job_type: Job type, e.g. libfuzzer_chrome_asan.
  """
    if not job_type:
        logs.log_error(
            'job_type is not set when training ML RNN for fuzzer %s.' %
            fuzzer_name)
        return

    # Directory to place training files, such as logs, models, corpus.
    # Use |FUZZ_INPUTS_DISK| since it is not size constrained.
    temp_directory = environment.get_value('FUZZ_INPUTS_DISK')

    # Get corpus.
    corpus_directory = get_corpus_directory(temp_directory, fuzzer_name)
    shell.remove_directory(corpus_directory, recreate=True)

    logs.log('Downloading corpus backup for %s.' % fuzzer_name)

    if not get_corpus(corpus_directory, fuzzer_name):
        logs.log_error('Failed to download corpus backup for %s.' %
                       fuzzer_name)
        return

    # Get the directory to save models.
    model_directory = get_model_files_directory(temp_directory, fuzzer_name)
    shell.remove_directory(model_directory, recreate=True)

    # Get the directory to save training logs.
    log_directory = get_model_log_directory(temp_directory, fuzzer_name)
    shell.remove_directory(log_directory, recreate=True)

    result = train_rnn(corpus_directory, model_directory, log_directory)

    # Training process exited abnormally but not caused by timeout, meaning
    # error occurred during execution.
    if result.return_code and not result.timed_out:
        if result.return_code == constants.ExitCode.CORPUS_TOO_SMALL:
            logs.log_warn(
                'ML RNN training task for fuzzer %s aborted due to small corpus.'
                % fuzzer_name)
        else:
            logs.log_error(
                'ML RNN training task for fuzzer %s failed with ExitCode = %d.'
                % (fuzzer_name, result.return_code),
                output=result.output)
        return

    # Timing out may be caused by large training corpus, but intermediate models
    # are frequently saved and can be uploaded.
    if result.timed_out:
        logs.log_warn('ML RNN training task for %s timed out.' % fuzzer_name)

    upload_model_to_gcs(model_directory, fuzzer_name)
Esempio n. 11
0
 def recreate_directory(directory_path):
     shell.create_directory_if_needed(directory_path,
                                      create_intermediates=True)
     shell.remove_directory(directory_path, recreate=True)
 def tearDown(self):
     shell.remove_directory(self.model_directory)
     shell.remove_directory(self.log_directory)
 def tearDown(self):
     shell.remove_directory(self.temp_dir)
Esempio n. 14
0
 def delete(self):
   """Delete this build."""
   # This overrides BaseBuild.delete (which deletes the entire base build
   # directory) to delete this specific build.
   shell.remove_directory(self.build_dir)
Esempio n. 15
0
 def delete(self):
   """Delete this build."""
   shell.remove_directory(self.base_build_dir)
Esempio n. 16
0
def _unpack_build(base_build_dir, build_dir, build_url, target_weights=None):
  """Unpacks a build from a build url into the build directory."""
  # Track time taken to unpack builds so that it doesn't silently regress.
  start_time = time.time()

  # Free up memory.
  utils.python_gc()

  # Remove the current build.
  logs.log('Removing build directory %s.' % build_dir)
  if not shell.remove_directory(build_dir, recreate=True):
    logs.log_error('Unable to clear build directory %s.' % build_dir)
    _handle_unrecoverable_error_on_windows()
    return False

  # Decide whether to use cache build archives or not.
  use_cache = environment.get_value('CACHE_STORE', False)

  # Download build archive locally.
  build_local_archive = os.path.join(build_dir, os.path.basename(build_url))

  # Make the disk space necessary for the archive available.
  archive_size = storage.get_download_file_size(
      build_url, build_local_archive, use_cache=True)
  if archive_size is not None and not _make_space(archive_size, base_build_dir):
    shell.clear_data_directories()
    logs.log_fatal_and_exit(
        'Failed to make space for download. '
        'Cleared all data directories to free up space, exiting.')

  logs.log('Downloading build from url %s.' % build_url)
  try:
    storage.copy_file_from(build_url, build_local_archive, use_cache=use_cache)
  except:
    logs.log_error('Unable to download build url %s.' % build_url)
    return False

  unpack_everything = environment.get_value('UNPACK_ALL_FUZZ_TARGETS_AND_FILES')
  if not unpack_everything:
    # For fuzzing, pick a random fuzz target so that we only un-archive that
    # particular fuzz target and its dependencies and save disk space.
    # If we are going to unpack everythng in archive based on
    # |UNPACK_ALL_FUZZ_TARGETS_AND_FILES| in the job defition, then don't set a
    # random fuzz target before we've unpacked the build. It won't actually save
    # us anything in this case and can be really expensive for large builds
    # (such as Chrome OS). Defer setting it until after the build has been
    # unpacked.
    _set_random_fuzz_target_for_fuzzing_if_needed(
        _get_fuzz_targets_from_archive(build_local_archive), target_weights)

  # Actual list of files to unpack can be smaller if we are only unarchiving
  # a particular fuzz target.
  file_match_callback = _get_file_match_callback()
  assert not (unpack_everything and file_match_callback is not None)

  if not _make_space_for_build(build_local_archive, base_build_dir,
                               file_match_callback):
    shell.clear_data_directories()
    logs.log_fatal_and_exit(
        'Failed to make space for build. '
        'Cleared all data directories to free up space, exiting.')

  # Unpack the local build archive.
  logs.log('Unpacking build archive %s.' % build_local_archive)
  trusted = not utils.is_oss_fuzz()
  try:
    archive.unpack(
        build_local_archive,
        build_dir,
        trusted=trusted,
        file_match_callback=file_match_callback)
  except:
    logs.log_error('Unable to unpack build archive %s.' % build_local_archive)
    return False

  if unpack_everything:
    # Set a random fuzz target now that the build has been unpacked, if we
    # didn't set one earlier.
    _set_random_fuzz_target_for_fuzzing_if_needed(
        _get_fuzz_targets_from_dir(build_dir), target_weights)

  # If this is partial build due to selected build files, then mark it as such
  # so that it is not re-used.
  if file_match_callback:
    partial_build_file_path = os.path.join(build_dir, PARTIAL_BUILD_FILE)
    utils.write_data_to_file('', partial_build_file_path)

  # No point in keeping the archive around.
  shell.remove_file(build_local_archive)

  end_time = time.time()
  elapsed_time = end_time - start_time
  log_func = logs.log_warn if elapsed_time > UNPACK_TIME_LIMIT else logs.log
  log_func('Build took %0.02f minutes to unpack.' % (elapsed_time / 60.))

  return True