コード例 #1
0
    def test(self):
        """Tests copy_local_directory_to_remote."""
        utils.write_data_to_file('a', os.path.join(self.local_temp_dir, 'a'))
        shell.create_directory(os.path.join(self.local_temp_dir, 'b'))
        utils.write_data_to_file('c',
                                 os.path.join(self.local_temp_dir, 'b', 'c'))

        adb.copy_local_directory_to_remote(self.local_temp_dir,
                                           self.device_temp_dir)

        self.assertTrue(
            adb.file_exists(os.path.join(self.device_temp_dir, 'a')))
        self.assertFalse(
            adb.directory_exists(os.path.join(self.device_temp_dir, 'a')))
        self.assertEqual(
            adb.get_file_size(os.path.join(self.device_temp_dir, 'a')), 1)

        self.assertTrue(
            adb.directory_exists(os.path.join(self.device_temp_dir, 'b')))
        self.assertFalse(
            adb.file_exists(os.path.join(self.device_temp_dir, 'b')))

        self.assertTrue(
            adb.file_exists(os.path.join(self.device_temp_dir, 'b', 'c')))
        self.assertFalse(
            adb.directory_exists(os.path.join(self.device_temp_dir, 'b', 'c')))
        self.assertEqual(
            adb.get_file_size(os.path.join(self.device_temp_dir, 'b', 'c')), 1)
コード例 #2
0
def _update_environment_for_testcase(testcase, build_directory,
                                     application_override):
    """Update environment variables that depend on the test case."""
    commands.update_environment_for_job(testcase.job_definition)
    environment.set_value('JOB_NAME', testcase.job_type)

    # Override app name if explicitly specified.
    if application_override:
        environment.set_value('APP_NAME', application_override)

    if testcase.fuzzer_name:
        fuzzer_directory = setup.get_fuzzer_directory(testcase.fuzzer_name)
    else:
        fuzzer_directory = os.path.join(environment.get_value('ROOT_DIR'),
                                        'fuzzer')
        shell.create_directory(fuzzer_directory)

    environment.set_value('FUZZER_DIR', fuzzer_directory)

    task_name = environment.get_value('TASK_NAME')
    setup.prepare_environment_for_testcase(testcase, testcase.job_type,
                                           task_name)

    build_manager.set_environment_vars(
        [environment.get_value('FUZZER_DIR'), build_directory])

    _verify_target_exists(build_directory)
コード例 #3
0
ファイル: utils.py プロジェクト: vanhauser-thc/clusterfuzz
def get_temp_dir():
    """Return the temp dir."""
    temp_dirname = 'temp-' + str(os.getpid())
    temp_directory = os.path.join(
        environment.get_value('FUZZ_INPUTS_DISK', tempfile.gettempdir()),
        temp_dirname)
    shell.create_directory(temp_directory)
    return temp_directory
コード例 #4
0
    def _create_temp_corpus_directory(self, name):
        """Create temporary corpus directory. Returns path to the created
    directory."""
        testcases_directory = environment.get_value('FUZZ_INPUTS_DISK')
        directory_path = os.path.join(testcases_directory, name)
        shell.create_directory(directory_path)
        self._created_directories.append(directory_path)

        return directory_path
コード例 #5
0
ファイル: minijail.py プロジェクト: stjordanis/clusterfuzz
    def _makedirs(self, directory):
        """Create directories for binding in chroot.

    Args:
      directory: The absolute path to the directory target in the chroot.
    """
        if directory[0] == '/':
            directory = directory[1:]

        shell.create_directory(os.path.join(self._chroot_dir, directory),
                               create_intermediates=True)
コード例 #6
0
  def convert_path_for_write(self, remote_path, directory=OBJECTS_DIR):
    """Get the local FS path for writing to the remote path. Creates any
    intermediate directories if necessary (except for the parent bucket
    directory)."""
    bucket, path = get_bucket_name_and_path(remote_path)
    if not os.path.exists(self._fs_bucket_path(bucket)):
      raise RuntimeError(
          'Bucket {bucket} does not exist.'.format(bucket=bucket))

    fs_path = self._fs_path(bucket, path, directory)
    shell.create_directory(os.path.dirname(fs_path), create_intermediates=True)

    return fs_path
コード例 #7
0
    def init_corpus(self, source_dir, destination_dir):
        """Uses corpus from the cloud to initialize syzkaller corpus.

    Args:
      source_dir: Folder where the corpus is downloaded from the cloud.
      destination_dir: Folder where syzkaller will be looking for corpus.
    """
        source_file = os.path.join(source_dir,
                                   self._get_device_corpus_db_filename())
        shell.create_directory(destination_dir)
        destination_file = os.path.join(destination_dir, CORPUS_DB_FILENAME)
        if os.path.isfile(source_file):
            shutil.copy(source_file, destination_file)
コード例 #8
0
def get_corpus_directory(input_directory, project_qualified_name):
    """Get the corpus directory given a project qualified fuzz target name."""
    corpus_directory = os.path.join(input_directory, project_qualified_name)
    if environment.is_trusted_host():
        from clusterfuzz._internal.bot.untrusted_runner import file_host
        corpus_directory = file_host.rebase_to_worker_root(corpus_directory)

    # Create corpus directory if it does not exist already.
    if environment.is_trusted_host():
        from clusterfuzz._internal.bot.untrusted_runner import file_host
        file_host.create_directory(corpus_directory, create_intermediates=True)
    else:
        shell.create_directory(corpus_directory)
    return corpus_directory
コード例 #9
0
    def save_corpus(self, source_dir, destination_dir):
        """Saves syzkaller to folder so it is backed up to the cloud.

    Args:
      source_dir: Folder where syzkaller corpus is.
      destination_dir: Folder where the corpus is synced with the cloud.
    """
        source_file = os.path.join(source_dir, CORPUS_DB_FILENAME)
        shell.create_directory(destination_dir)
        destination_file = os.path.join(destination_dir,
                                        self._get_device_corpus_db_filename())
        if os.path.isfile(source_file) and (
                not os.path.exists(destination_file) or
            (os.path.getsize(source_file) >
             os.path.getsize(destination_file))):
            shutil.copy(source_file, destination_file)
コード例 #10
0
def setup_user_profile_directory_if_needed(user_profile_directory):
    """Set user profile directory if it does not exist."""
    if os.path.exists(user_profile_directory):
        # User profile directory already exists. Bail out.
        return

    shell.create_directory(user_profile_directory)

    # Create a file in user profile directory based on format:
    # filename;base64 encoded zlib compressed file contents.
    user_profile_file = environment.get_value('USER_PROFILE_FILE')
    if user_profile_file and ';' in user_profile_file:
        user_profile_filename, encoded_file_contents = (
            user_profile_file.split(';', 1))
        user_profile_file_contents = zlib.decompress(
            base64.b64decode(encoded_file_contents))
        user_profile_file_path = os.path.join(user_profile_directory,
                                              user_profile_filename)
        utils.write_data_to_file(user_profile_file_contents,
                                 user_profile_file_path)

    # For Firefox, we need to install a special fuzzPriv extension that exposes
    # special functions to javascript, e.g. gc(), etc.
    app_name = environment.get_value('APP_NAME')
    if app_name.startswith('firefox'):
        # Create extensions directory.
        extensions_directory = os.path.join(user_profile_directory,
                                            'extensions')
        shell.create_directory(extensions_directory)

        # Unpack the fuzzPriv extension.
        extension_archive = os.path.join(environment.get_resources_directory(),
                                         'firefox', 'fuzzPriv-extension.zip')
        archive.unpack(extension_archive, extensions_directory)

        # Add this extension in the extensions configuration file.
        extension_config_file_path = os.path.join(user_profile_directory,
                                                  'extensions.ini')
        fuzz_extension_directory = os.path.join(extensions_directory,
                                                '*****@*****.**')
        extension_config_file_contents = ('[ExtensionDirs]\r\n'
                                          'Extension0=%s\r\n'
                                          '\r\n'
                                          '[ThemeDirs]\r\n' %
                                          fuzz_extension_directory)
        utils.write_data_to_file(extension_config_file_contents,
                                 extension_config_file_path)
コード例 #11
0
    def _cross_pollinate_other_fuzzer_corpuses(self):
        """Add other fuzzer corpuses to shared corpus path for cross-pollination."""
        corpus_backup_date = utils.utcnow().date() - datetime.timedelta(
            days=data_types.CORPUS_BACKUP_PUBLIC_LOOKBACK_DAYS)

        for cross_pollinate_fuzzer in self.cross_pollinate_fuzzers:
            project_qualified_name = (
                cross_pollinate_fuzzer.fuzz_target.project_qualified_name())
            backup_bucket_name = cross_pollinate_fuzzer.backup_bucket_name
            corpus_engine_name = cross_pollinate_fuzzer.corpus_engine_name

            corpus_backup_url = corpus_manager.gcs_url_for_backup_file(
                backup_bucket_name, corpus_engine_name, project_qualified_name,
                corpus_backup_date)
            corpus_backup_local_filename = '%s-%s' % (
                project_qualified_name, os.path.basename(corpus_backup_url))
            corpus_backup_local_path = os.path.join(
                self.shared_corpus_path, corpus_backup_local_filename)

            if not storage.exists(corpus_backup_url, ignore_errors=True):
                # This can happen in cases when a new fuzz target is checked in or if
                # missed to capture a backup for a particular day (for OSS-Fuzz, this
                # will result in a 403 instead of 404 since that GCS path belongs to
                # other project). So, just log a warning for debugging purposes only.
                logs.log_warn('Corpus backup does not exist, ignoring: %s.' %
                              corpus_backup_url)
                continue

            if not storage.copy_file_from(corpus_backup_url,
                                          corpus_backup_local_path):
                continue

            corpus_backup_output_directory = os.path.join(
                self.shared_corpus_path, project_qualified_name)
            shell.create_directory(corpus_backup_output_directory)
            result = archive.unpack(corpus_backup_local_path,
                                    corpus_backup_output_directory)
            shell.remove_file(corpus_backup_local_path)

            if result:
                logs.log(
                    'Corpus backup url %s successfully unpacked into shared corpus.'
                    % corpus_backup_url)
            else:
                logs.log_error('Failed to unpack corpus backup from url %s.' %
                               corpus_backup_url)
コード例 #12
0
  def rsync_to_disk(self,
                    directory,
                    timeout=CORPUS_FILES_SYNC_TIMEOUT,
                    delete=True):
    """Run gsutil to download corpus files from GCS.

    Args:
      directory: Path to directory to sync to.
      timeout: Timeout for gsutil.
      delete: Whether or not to delete files on disk that don't exist locally.

    Returns:
      A bool indicating whether or not the command succeeded.
    """
    shell.create_directory(directory, create_intermediates=True)

    corpus_gcs_url = self.get_gcs_url()
    result = self._gsutil_runner.rsync(corpus_gcs_url, directory, timeout,
                                       delete)

    # Allow a small number of files to fail to be synced.
    return _handle_rsync_result(result, max_errors=MAX_SYNC_ERRORS)
コード例 #13
0
def request(url,
            body=None,
            method=POST_METHOD,
            force_reauthorization=False,
            configuration=None):
    """Make an HTTP request to the specified URL."""
    if configuration:
        authorization = _get_authorization(force_reauthorization,
                                           configuration)
        headers = {
            'User-Agent': 'clusterfuzz-reproduce',
            'Authorization': authorization
        }
    else:
        headers = {}

    http = httplib2.Http()
    request_body = json_utils.dumps(body) if body is not None else ''
    response, content = http.request(url,
                                     method=method,
                                     headers=headers,
                                     body=request_body)

    # If the server returns 401 we may need to reauthenticate. Try the request
    # a second time if this happens.
    if response.status == 401 and not force_reauthorization:
        return request(url,
                       body,
                       method=method,
                       force_reauthorization=True,
                       configuration=configuration)

    if AUTHORIZATION_HEADER in response:
        shell.create_directory(os.path.dirname(AUTHORIZATION_CACHE_FILE),
                               create_intermediates=True)
        utils.write_data_to_file(response[AUTHORIZATION_HEADER],
                                 AUTHORIZATION_CACHE_FILE)

    return response, content
コード例 #14
0
 def create_directories(self):
     """Creates directories needed to use mutator plugins."""
     # TODO(320): Change mutator plugin downloads so that they don't need to be
     # deleted and redownloaded on each run of launcher.py.
     shell.create_directory(environment.get_value('MUTATOR_PLUGINS_DIR'),
                            create_intermediates=True,
                            recreate=True)
     shell.create_directory(_get_mutator_plugins_archives_dir(),
                            create_intermediates=True,
                            recreate=True)
     shell.create_directory(_get_mutator_plugins_unpacked_dir(),
                            create_intermediates=True,
                            recreate=True)
コード例 #15
0
ファイル: storage.py プロジェクト: stjordanis/clusterfuzz
def store_file_in_cache(file_path,
                        cached_files_per_directory_limit=True,
                        force_update=False):
    """Get file from nfs cache if available."""
    if not os.path.exists(file_path):
        logs.log_error(
            'Local file %s does not exist, nothing to store in cache.' %
            file_path)
        return

    if os.path.getsize(file_path) > CACHE_SIZE_LIMIT:
        logs.log('File %s is too large to store in cache, skipping.' %
                 file_path)
        return

    nfs_root = environment.get_value('NFS_ROOT')
    if not nfs_root:
        # No NFS, nothing to store in cache.
        return

    # If NFS server is not available due to heavy load, skip storage operation
    # altogether as we would fail to store file.
    if not os.path.exists(os.path.join(nfs_root,
                                       '.')):  # Use . to iterate mount.
        logs.log_warn('Cache %s not available.' % nfs_root)
        return

    cache_file_path = get_cache_file_path(file_path)
    cache_directory = os.path.dirname(cache_file_path)
    filename = os.path.basename(file_path)

    if not os.path.exists(cache_directory):
        if not shell.create_directory(cache_directory,
                                      create_intermediates=True):
            logs.log_error('Failed to create cache directory %s.' %
                           cache_directory)
            return

    # Check if the file already exists in cache.
    if file_exists_in_cache(cache_file_path):
        if not force_update:
            return

        # If we are forcing update, we need to remove current cached file and its
        # metadata.
        remove_cache_file_and_metadata(cache_file_path)

    # Delete old cached files beyond our maximum storage limit.
    if cached_files_per_directory_limit:
        # Get a list of cached files.
        cached_files_list = []
        for cached_filename in os.listdir(cache_directory):
            if cached_filename.endswith(CACHE_METADATA_FILE_EXTENSION):
                continue
            cached_file_path = os.path.join(cache_directory, cached_filename)
            cached_files_list.append(cached_file_path)

        mtime = lambda f: os.stat(f).st_mtime
        last_used_cached_files_list = list(
            sorted(cached_files_list, key=mtime, reverse=True))
        for cached_file_path in (
                last_used_cached_files_list[MAX_CACHED_FILES_PER_DIRECTORY -
                                            1:]):
            remove_cache_file_and_metadata(cached_file_path)

    # Start storing the actual file in cache now.
    logs.log('Started storing file %s into cache.' % filename)

    # Fetch lock to store this file. Try only once since if any other bot has
    # started to store it, we don't need to do it ourselves. Just bail out.
    lock_name = 'store:cache_file:%s' % utils.string_hash(cache_file_path)
    if not locks.acquire_lock(lock_name,
                              max_hold_seconds=CACHE_LOCK_TIMEOUT,
                              retries=1,
                              by_zone=True):
        logs.log_warn(
            'Unable to fetch lock to update cache file %s, skipping.' %
            filename)
        return

    # Check if another bot already updated it.
    if file_exists_in_cache(cache_file_path):
        locks.release_lock(lock_name, by_zone=True)
        return

    shell.copy_file(file_path, cache_file_path)
    write_cache_file_metadata(cache_file_path, file_path)
    time.sleep(CACHE_COPY_WAIT_TIME)
    error_occurred = not file_exists_in_cache(cache_file_path)
    locks.release_lock(lock_name, by_zone=True)

    if error_occurred:
        logs.log_error('Failed to store file %s into cache.' % filename)
    else:
        logs.log('Completed storing file %s into cache.' % filename)
コード例 #16
0
def update_data_bundle(fuzzer, data_bundle):
    """Updates a data bundle to the latest version."""
    # This module can't be in the global imports due to appengine issues
    # with multiprocessing and psutil imports.
    from clusterfuzz._internal.google_cloud_utils import gsutil

    # If we are using a data bundle on NFS, it is expected that our testcases
    # will usually be large enough that we would fill up our tmpfs directory
    # pretty quickly. So, change it to use an on-disk directory.
    if not data_bundle.is_local:
        testcase_disk_directory = environment.get_value('FUZZ_INPUTS_DISK')
        environment.set_value('FUZZ_INPUTS', testcase_disk_directory)

    data_bundle_directory = get_data_bundle_directory(fuzzer.name)
    if not data_bundle_directory:
        logs.log_error('Failed to setup data bundle %s.' % data_bundle.name)
        return False

    if not shell.create_directory(data_bundle_directory,
                                  create_intermediates=True):
        logs.log_error('Failed to create data bundle %s directory.' %
                       data_bundle.name)
        return False

    # Check if data bundle is up to date. If yes, skip the update.
    if _is_data_bundle_up_to_date(data_bundle, data_bundle_directory):
        logs.log('Data bundle was recently synced, skip.')
        return True

    # Fetch lock for this data bundle.
    if not _fetch_lock_for_data_bundle_update(data_bundle):
        logs.log_error('Failed to lock data bundle %s.' % data_bundle.name)
        return False

    # Re-check if another bot did the sync already. If yes, skip.
    if _is_data_bundle_up_to_date(data_bundle, data_bundle_directory):
        logs.log('Another bot finished the sync, skip.')
        _release_lock_for_data_bundle_update(data_bundle)
        return True

    time_before_sync_start = time.time()

    # No need to sync anything if this is a search index data bundle. In that
    # case, the fuzzer will generate testcases from a gcs bucket periodically.
    if not _is_search_index_data_bundle(data_bundle.name):
        bucket_url = data_handler.get_data_bundle_bucket_url(data_bundle.name)

        if environment.is_trusted_host() and data_bundle.sync_to_worker:
            from clusterfuzz._internal.bot.untrusted_runner import corpus_manager
            from clusterfuzz._internal.bot.untrusted_runner import file_host
            worker_data_bundle_directory = file_host.rebase_to_worker_root(
                data_bundle_directory)

            file_host.create_directory(worker_data_bundle_directory,
                                       create_intermediates=True)
            result = corpus_manager.RemoteGSUtilRunner().rsync(
                bucket_url, worker_data_bundle_directory, delete=False)
        else:
            result = gsutil.GSUtilRunner().rsync(bucket_url,
                                                 data_bundle_directory,
                                                 delete=False)

        if result.return_code != 0:
            logs.log_error('Failed to sync data bundle %s: %s.' %
                           (data_bundle.name, result.output))
            _release_lock_for_data_bundle_update(data_bundle)
            return False

    # Update the testcase list file.
    testcase_manager.create_testcase_list_file(data_bundle_directory)

    #  Write last synced time in the sync file.
    sync_file_path = _get_data_bundle_sync_file_path(data_bundle_directory)
    utils.write_data_to_file(time_before_sync_start, sync_file_path)
    if environment.is_trusted_host() and data_bundle.sync_to_worker:
        from clusterfuzz._internal.bot.untrusted_runner import file_host
        worker_sync_file_path = file_host.rebase_to_worker_root(sync_file_path)
        file_host.copy_file_to_worker(sync_file_path, worker_sync_file_path)

    # Release acquired lock.
    _release_lock_for_data_bundle_update(data_bundle)

    return True
コード例 #17
0
def create_directory(request, _):
    """Create a directory."""
    result = shell.create_directory(request.path, request.create_intermediates)
    return untrusted_runner_pb2.CreateDirectoryResponse(result=result)
コード例 #18
0
ファイル: run.py プロジェクト: vanhauser-thc/clusterfuzz
def main():
  """Main sync routine."""
  tests_archive_bucket = environment.get_value('TESTS_ARCHIVE_BUCKET')
  tests_archive_name = environment.get_value('TESTS_ARCHIVE_NAME')
  tests_directory = environment.get_value('TESTS_DIR')
  sync_interval = environment.get_value('SYNC_INTERVAL')  # in seconds.

  shell.create_directory(tests_directory)

  # Sync old crash tests.
  logs.log('Syncing old crash tests.')
  crash_testcases_directory = os.path.join(tests_directory, 'CrashTests')
  shell.create_directory(crash_testcases_directory)
  unpack_crash_testcases(crash_testcases_directory)

  # Sync web tests.
  logs.log('Syncing web tests.')
  src_directory = os.path.join(tests_directory, 'src')
  gclient_file_path = os.path.join(tests_directory, '.gclient')
  if not os.path.exists(gclient_file_path):
    subprocess.check_call(
        ['fetch', '--no-history', 'chromium', '--nosvn=True'],
        cwd=tests_directory)
  if os.path.exists(src_directory):
    subprocess.check_call(['gclient', 'revert'], cwd=src_directory)
    subprocess.check_call(['git', 'pull'], cwd=src_directory)
    subprocess.check_call(['gclient', 'sync'], cwd=src_directory)
  else:
    raise Exception('Unable to checkout web tests.')

  clone_git_repository(tests_directory, 'v8',
                       'https://chromium.googlesource.com/v8/v8')

  clone_git_repository(tests_directory, 'ChakraCore',
                       'https://github.com/Microsoft/ChakraCore.git')

  clone_git_repository(tests_directory, 'gecko-dev',
                       'https://github.com/mozilla/gecko-dev.git')

  clone_git_repository(tests_directory, 'webgl-conformance-tests',
                       'https://github.com/KhronosGroup/WebGL.git')

  checkout_svn_repository(
      tests_directory, 'WebKit/LayoutTests',
      'http://svn.webkit.org/repository/webkit/trunk/LayoutTests')

  checkout_svn_repository(
      tests_directory, 'WebKit/JSTests/stress',
      'http://svn.webkit.org/repository/webkit/trunk/JSTests/stress')

  checkout_svn_repository(
      tests_directory, 'WebKit/JSTests/es6',
      'http://svn.webkit.org/repository/webkit/trunk/JSTests/es6')

  create_gecko_tests_directory(tests_directory, 'gecko-dev', 'gecko-tests')

  # Upload tests archive to google cloud storage.
  logs.log('Uploading tests archive to cloud.')
  tests_archive_local = os.path.join(tests_directory, tests_archive_name)
  tests_archive_remote = 'gs://{bucket_name}/{archive_name}'.format(
      bucket_name=tests_archive_bucket, archive_name=tests_archive_name)
  shell.remove_file(tests_archive_local)
  create_symbolic_link(tests_directory, 'gecko-dev/js/src/tests',
                       'spidermonkey')
  create_symbolic_link(tests_directory, 'ChakraCore/test', 'chakra')

  # FIXME: Find a way to rename LayoutTests to web_tests without breaking
  # compatibility with older testcases.
  create_symbolic_link(tests_directory, 'src/third_party/blink/web_tests',
                       'LayoutTests')

  subprocess.check_call(
      [
          'zip',
          '-r',
          tests_archive_local,
          'CrashTests',
          'LayoutTests',
          'WebKit',
          'gecko-tests',
          'v8/test/mjsunit',
          'spidermonkey',
          'chakra',
          'webgl-conformance-tests',
          '-x',
          '*.cc',
          '-x',
          '*.cpp',
          '-x',
          '*.py',
          '-x',
          '*.txt',
          '-x',
          '*-expected.*',
          '-x',
          '*.git*',
          '-x',
          '*.svn*',
      ],
      cwd=tests_directory)
  subprocess.check_call(
      ['gsutil', 'cp', tests_archive_local, tests_archive_remote])

  logs.log('Completed cycle, sleeping for %s seconds.' % sync_interval)
  time.sleep(sync_interval)
コード例 #19
0
ファイル: run.py プロジェクト: vanhauser-thc/clusterfuzz
def unpack_crash_testcases(crash_testcases_directory):
  """Unpacks the old crash testcases in the provided directory."""
  for testcase in ndb_utils.get_all_from_model(data_types.Testcase):
    testcase_id = testcase.key.id()

    # 1. If we have already stored the testcase, then just skip.
    if testcase_id in STORED_TESTCASES_LIST:
      continue

    # 2. Make sure that it is a unique crash testcase. Ignore duplicates,
    # uploaded repros.
    if testcase.status != 'Processed':
      continue

    # 3. Check if the testcase is fixed. If not, skip.
    if testcase.open:
      continue

    # 4. Check if the testcase has a minimized repro. If not, skip.
    if not testcase.minimized_keys or testcase.minimized_keys == 'NA':
      continue

    # 5. Only use testcases that have bugs associated with them.
    if not testcase.bug_information:
      continue

    # 6. Existing IPC testcases are un-interesting and unused in further
    # mutations. Due to size bloat, ignoring these for now.
    if testcase.absolute_path.endswith(testcase_manager.IPCDUMP_EXTENSION):
      continue

    # 7. Ignore testcases that are archives (e.g. Langfuzz fuzzer tests).
    if archive.get_archive_type(testcase.absolute_path):
      continue

    # 8. Skip in-process fuzzer testcases, since these are only applicable to
    # fuzz targets and don't run with blackbox binaries.
    if testcase.fuzzer_name and testcase.fuzzer_name in ['afl', 'libFuzzer']:
      continue

    # Un-pack testcase.
    try:
      _, input_directory, _ = setup.unpack_testcase(testcase)
    except Exception:
      logs.log_error('Failed to unpack testcase %d.' % testcase.key.id())
      continue

    # Move this to our crash testcases directory.
    crash_testcase_directory = os.path.join(crash_testcases_directory,
                                            str(testcase_id))
    shell.move(input_directory, crash_testcase_directory)

    # Re-create input directory for unpacking testcase in next iteration.
    shell.create_directory(input_directory)

    STORED_TESTCASES_LIST.append(testcase_id)

  # Remove testcase directories that exceed the max size limit.
  for directory_name in os.listdir(crash_testcases_directory):
    directory_path = os.path.join(crash_testcases_directory, directory_name)
    if not os.path.isdir(directory_path):
      continue

    if shell.get_directory_size(directory_path) <= MAX_TESTCASE_DIRECTORY_SIZE:
      continue

    shell.remove_directory(directory_path)

  # Rename all fuzzed testcase files as regular files.
  for root, _, files in os.walk(crash_testcases_directory):
    for filename in files:
      if not filename.startswith(testcase_manager.FUZZ_PREFIX):
        continue

      file_path = os.path.join(root, filename)
      stripped_file_name = os.path.basename(file_path)[len(
          testcase_manager.FUZZ_PREFIX):]
      stripped_file_path = os.path.join(
          os.path.dirname(file_path), stripped_file_name)
      try:
        os.rename(file_path, stripped_file_path)
      except:
        raise Exception('Failed to rename testcase %s.' % file_path)

  # Remove empty files and dirs to avoid the case where a fuzzer randomly
  # chooses an empty dir/file and generates zero testcases.
  shell.remove_empty_files(crash_testcases_directory)
  shell.remove_empty_directories(crash_testcases_directory)
コード例 #20
0
ファイル: adb.py プロジェクト: vanhauser-thc/clusterfuzz
def copy_remote_file_to_local(remote_file_path, local_file_path):
  """Copies device file to a local file."""
  shell.create_directory(
      os.path.dirname(local_file_path), create_intermediates=True)
  run_command(['pull', remote_file_path, local_file_path])