예제 #1
0
    def test_get_filesystem_and_path_or_paths(self):
        fs1, path1 = get_filesystem_and_path_or_paths('file:///some/path')
        assert isinstance(fs1, LocalFileSystem) and path1 == '/some/path'

        fs2, paths2 = get_filesystem_and_path_or_paths(
            ['file:///some/path/01.parquet', 'file:///some/path/02.parquet'])
        assert isinstance(fs2, LocalFileSystem) and \
            paths2 == ['/some/path/01.parquet', '/some/path/02.parquet']

        with self.assertRaises(ValueError):
            get_filesystem_and_path_or_paths([
                'file:///some/path/01.parquet', 'hdfs:///some/path/02.parquet'
            ])
예제 #2
0
def _wait_file_available(url_list):
    """Waiting about _FILE_AVAILABILITY_WAIT_TIMEOUT_SECS seconds (default 30 seconds) to make sure
    all files are available for reading. This is useful in some filesystems, such as S3 which only
    providing eventually consistency.
    """
    fs, path_list = get_filesystem_and_path_or_paths(url_list)
    logger.debug('Waiting some seconds until all parquet-store files appear at urls %s', ','.join(url_list))

    def wait_for_file(path):
        end_time = time.time() + _FILE_AVAILABILITY_WAIT_TIMEOUT_SECS
        while time.time() < end_time:
            if fs.exists(path):
                return True
            time.sleep(0.1)
        return False

    pool = ThreadPool(64)
    try:
        results = pool.map(wait_for_file, path_list)
        failed_list = [url for url, result in zip(url_list, results) if not result]
        if failed_list:
            raise RuntimeError('Timeout while waiting for all parquet-store files to appear at urls {failed_list},'
                               'Please check whether these files were saved successfully when materializing dataframe.'
                               .format(failed_list=','.join(failed_list)))
    finally:
        pool.close()
        pool.join()
예제 #3
0
def get_schema_from_dataset_url(dataset_url_or_urls,
                                hdfs_driver='libhdfs3',
                                storage_options=None,
                                filesystem=None):
    """Returns a :class:`petastorm.unischema.Unischema` object loaded from a dataset specified by a url.

    :param dataset_url_or_urls: a url to a parquet directory or a url list (with the same scheme) to parquet files.
    :param hdfs_driver: A string denoting the hdfs driver to use (if using a dataset on hdfs). Current choices are
        libhdfs (java through JNI) or libhdfs3 (C++)
    :param storage_options: Dict of kwargs forwarded to ``fsspec`` to initialize the filesystem.
    :param fileystem: the ``pyarrow.FileSystem`` to use.
    :return: A :class:`petastorm.unischema.Unischema` object
    """
    fs, path_or_paths = get_filesystem_and_path_or_paths(
        dataset_url_or_urls,
        hdfs_driver,
        storage_options=storage_options,
        filesystem=filesystem)

    dataset = pq.ParquetDataset(path_or_paths,
                                filesystem=fs,
                                validate_schema=False,
                                metadata_nthreads=10)

    # Get a unischema stored in the dataset metadata.
    stored_schema = get_schema(dataset)

    return stored_schema
예제 #4
0
def get_schema_from_dataset_url(dataset_url_or_urls,
                                hdfs_driver='libhdfs3',
                                filesystem=None):
    """Returns a :class:`petastorm.unischema.Unischema` object loaded from a dataset specified by a url.

    :param dataset_url_or_urls: a url to a parquet directory or a url list (with the same scheme) to parquet files.
    :param hdfs_driver: A string denoting the hdfs driver to use (if using a dataset on hdfs). Current choices are
        libhdfs (java through JNI) or libhdfs3 (C++)
    :param filesystem: An instance of :class:`pyarrow.fs.FileSystem` object.
    :return: A :class:`petastorm.unischema.Unischema` object
    """
    fs, path_or_paths = get_filesystem_and_path_or_paths(
        dataset_url_or_urls, hdfs_driver)
    if filesystem is not None:
        fs = filesystem

    dataset = pq.ParquetDataset(path_or_paths,
                                filesystem=fs,
                                validate_schema=False,
                                metadata_nthreads=10)

    # Get a unischema stored in the dataset metadata.
    stored_schema = get_schema(dataset)

    return stored_schema
예제 #5
0
def _check_parent_cache_dir_url(dir_url):
    """Check dir url whether is suitable to be used as parent cache directory."""
    _check_url(dir_url)
    fs, dir_path = get_filesystem_and_path_or_paths(dir_url)
    if 'DATABRICKS_RUNTIME_VERSION' in os.environ and not _is_spark_local_mode():
        if isinstance(fs, LocalFileSystem):
            # User need to use dbfs fuse URL.
            if not dir_path.startswith('/dbfs/'):
                logger.warning(
                    "Usually, when running on databricks spark cluster, you should specify a dbfs fuse path "
                    "for %s, like: 'file:/dbfs/path/to/cache_dir', otherwise, you should mount NFS to this "
                    "directory '%s' on all nodes of the cluster, e.g. using EFS.",
                    SparkDatasetConverter.PARENT_CACHE_DIR_URL_CONF, dir_url)
예제 #6
0
def _check_dataset_file_median_size(url_list):
    fs, path_list = get_filesystem_and_path_or_paths(url_list)

    # TODO: also check file size for other file system.
    if isinstance(fs, LocalFileSystem):
        pool = ThreadPool(64)
        try:
            file_size_list = pool.map(os.path.getsize, path_list)
            mid_index = len(file_size_list) // 2
            median_size = sorted(file_size_list)[mid_index]
            if median_size < 50 * 1024 * 1024:
                logger.warning('The median size (%d) of these parquet files (%s) is too small.'
                               'Increase file sizes by repartition or coalesce spark dataframe, which '
                               'will help improve performance.', median_size, ','.join(url_list))
        finally:
            pool.close()
            pool.join()
예제 #7
0
def _check_dataset_file_median_size(url_list):
    fs, path_list = get_filesystem_and_path_or_paths(url_list)
    RECOMMENDED_FILE_SIZE_BYTES = 50 * 1024 * 1024

    # TODO: also check file size for other file system.
    if isinstance(fs, LocalFileSystem):
        pool = ThreadPool(64)
        try:
            file_size_list = pool.map(os.path.getsize, path_list)
            if len(file_size_list) > 1:
                mid_index = len(file_size_list) // 2
                median_size = sorted(file_size_list)[mid_index]  # take the larger one if tie
                if median_size < RECOMMENDED_FILE_SIZE_BYTES:
                    logger.warning('The median size %d B (< 50 MB) of the parquet files is too small. '
                                   'Total size: %d B. Increase the median file size by calling df.repartition(n) or '
                                   'df.coalesce(n), which might help improve the performance. Parquet files: %s, ...',
                                   median_size, sum(file_size_list), url_list[0])
        finally:
            pool.close()
            pool.join()
예제 #8
0
def make_reader(dataset_url,
                schema_fields=None,
                reader_pool_type='thread',
                workers_count=10,
                pyarrow_serialize=False,
                results_queue_size=50,
                shuffle_row_groups=True,
                shuffle_row_drop_partitions=1,
                predicate=None,
                rowgroup_selector=None,
                num_epochs=1,
                cur_shard=None,
                shard_count=None,
                shard_seed=None,
                cache_type=NULL_CACHE,
                cache_location=None,
                cache_size_limit=None,
                cache_row_size_estimate=None,
                cache_extra_settings=None,
                hdfs_driver='libhdfs3',
                transform_spec=None,
                filters=None,
                storage_options=None,
                zmq_copy_buffers=True,
                filesystem=None):
    """
    Creates an instance of Reader for reading Petastorm datasets. A Petastorm dataset is a dataset generated using
    :func:`~petastorm.etl.dataset_metadata.materialize_dataset` context manager as explained
    `here <https://petastorm.readthedocs.io/en/latest/readme_include.html#generating-a-dataset>`_.

    See :func:`~petastorm.make_batch_reader` to read from a Parquet store that was not generated using
    :func:`~petastorm.etl.dataset_metadata.materialize_dataset`.

    :param dataset_url: an filepath or a url to a parquet directory,
        e.g. ``'hdfs://some_hdfs_cluster/user/yevgeni/parquet8'``, or ``'file:///tmp/mydataset'``,
        or ``'s3://bucket/mydataset'``, or ``'gs://bucket/mydataset'``.
    :param schema_fields: Can be: a list of unischema fields and/or regex pattern strings; ``None`` to read all fields;
            an NGram object, then it will return an NGram of the specified fields.
    :param reader_pool_type: A string denoting the reader pool type. Should be one of ['thread', 'process', 'dummy']
        denoting a thread pool, process pool, or running everything in the master thread. Defaults to 'thread'
    :param workers_count: An int for the number of workers to use in the reader pool. This only is used for the
        thread or process pool. Defaults to 10
    :param pyarrow_serialize: THE ARGUMENT IS DEPRECATED AND WILL BE REMOVED IN FUTURE VERSIONS.
    :param results_queue_size: Size of the results queue to store prefetched row-groups. Currently only applicable to
        thread reader pool type.
    :param shuffle_row_groups: Whether to shuffle row groups (the order in which full row groups are read)
    :param shuffle_row_drop_partitions: This is is a positive integer which determines how many partitions to
        break up a row group into for increased shuffling in exchange for worse performance (extra reads).
        For example if you specify 2 each row group read will drop half of the rows within every row group and
        read the remaining rows in separate reads. It is recommended to keep this number below the regular row
        group size in order to not waste reads which drop all rows.
    :param predicate: instance of :class:`.PredicateBase` object to filter rows to be returned by reader. The predicate
        will be passed a single row and must return a boolean value indicating whether to include it in the results.
    :param rowgroup_selector: instance of row group selector object to select row groups to be read
    :param num_epochs: An epoch is a single pass over all rows in the dataset. Setting ``num_epochs`` to
        ``None`` will result in an infinite number of epochs.
    :param cur_shard: An int denoting the current shard number. Each node reading a shard should
        pass in a unique shard number in the range [0, shard_count). shard_count must be supplied as well.
        Defaults to None
    :param shard_count: An int denoting the number of shards to break this dataset into. Defaults to None
    :param shard_seed: Random seed to shuffle row groups for data sharding. Defaults to None
    :param cache_type: A string denoting the cache type, if desired. Options are [None, 'null', 'local-disk'] to
        either have a null/noop cache or a cache implemented using diskcache. Caching is useful when communication
        to the main data store is either slow or expensive and the local machine has large enough storage
        to store entire dataset (or a partition of a dataset if shard_count is used). By default will be a null cache.
    :param cache_location: A string denoting the location or path of the cache.
    :param cache_size_limit: An int specifying the size limit of the cache in bytes
    :param cache_row_size_estimate: An int specifying the estimated size of a row in the dataset
    :param cache_extra_settings: A dictionary of extra settings to pass to the cache implementation,
    :param hdfs_driver: A string denoting the hdfs driver to use (if using a dataset on hdfs). Current choices are
        libhdfs (java through JNI) or libhdfs3 (C++)
    :param transform_spec: An instance of :class:`~petastorm.transform.TransformSpec` object defining how a record
        is transformed after it is loaded and decoded. The transformation occurs on a worker thread/process (depends
        on the ``reader_pool_type`` value).
    :param filters: (List[Tuple] or List[List[Tuple]]): Standard PyArrow filters.
        These will be applied when loading the parquet file with PyArrow. More information
        here: https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html
    :param storage_options: Dict of kwargs forwarded to ``fsspec`` to initialize the filesystem.
    :param zmq_copy_buffers: A bool indicating whether to use 0mq copy buffers with ProcessPool.
    :param filesystem: An instance of ``pyarrow.FileSystem`` to use. Will ignore storage_options and
        other filesystem configs if it's provided.
    :return: A :class:`Reader` object
    """
    dataset_url = normalize_dir_url(dataset_url)

    filesystem, dataset_path = get_filesystem_and_path_or_paths(
        dataset_url,
        hdfs_driver,
        storage_options=storage_options,
        filesystem=filesystem)

    if cache_type is None or cache_type == NULL_CACHE:
        cache = NullCache()
    elif cache_type == LOCAL_DISK_CACHE:
        cache = LocalDiskCache(cache_location, cache_size_limit,
                               cache_row_size_estimate, **cache_extra_settings
                               or {})
    else:
        raise ValueError('Unknown cache_type: {}'.format(cache_type))

    try:
        dataset_metadata.get_schema_from_dataset_url(
            dataset_url,
            hdfs_driver=hdfs_driver,
            storage_options=storage_options,
            filesystem=filesystem)
    except PetastormMetadataError:
        warnings.warn(
            'Currently make_reader supports reading only Petastorm datasets. '
            'To read from a non-Petastorm Parquet store use make_batch_reader')

    if reader_pool_type == 'thread':
        reader_pool = ThreadPool(workers_count, results_queue_size)
    elif reader_pool_type == 'process':
        if pyarrow_serialize:
            warnings.warn(
                "pyarrow_serializer was deprecated and will be removed in future versions. "
                "The argument no longer has any effect.")
        serializer = PickleSerializer()
        reader_pool = ProcessPool(workers_count,
                                  serializer,
                                  zmq_copy_buffers=zmq_copy_buffers)
    elif reader_pool_type == 'dummy':
        reader_pool = DummyPool()
    else:
        raise ValueError(
            'Unknown reader_pool_type: {}'.format(reader_pool_type))

    kwargs = {
        'schema_fields': schema_fields,
        'reader_pool': reader_pool,
        'shuffle_row_groups': shuffle_row_groups,
        'shuffle_row_drop_partitions': shuffle_row_drop_partitions,
        'predicate': predicate,
        'rowgroup_selector': rowgroup_selector,
        'num_epochs': num_epochs,
        'cur_shard': cur_shard,
        'shard_count': shard_count,
        'shard_seed': shard_seed,
        'cache': cache,
        'transform_spec': transform_spec,
        'filters': filters
    }

    try:
        return Reader(filesystem,
                      dataset_path,
                      worker_class=PyDictReaderWorker,
                      is_batched_reader=False,
                      **kwargs)
    except PetastormMetadataError as e:
        logger.error('Unexpected exception: %s', str(e))
        raise RuntimeError(
            'make_reader has failed. If you were trying to open a Parquet store that was not '
            'created using Petastorm materialize_dataset and it contains only scalar columns, '
            'you may use make_batch_reader to read it.\n'
            'Inner exception: %s', str(e))
예제 #9
0
def make_batch_reader(dataset_url_or_urls,
                      schema_fields=None,
                      reader_pool_type='thread',
                      workers_count=10,
                      shuffle_row_groups=True,
                      shuffle_row_drop_partitions=1,
                      predicate=None,
                      rowgroup_selector=None,
                      num_epochs=1,
                      cur_shard=None,
                      shard_count=None,
                      shard_seed=None,
                      cache_type='null',
                      cache_location=None,
                      cache_size_limit=None,
                      cache_row_size_estimate=None,
                      cache_extra_settings=None,
                      hdfs_driver='libhdfs3',
                      transform_spec=None,
                      filters=None,
                      storage_options=None,
                      zmq_copy_buffers=True,
                      filesystem=None):
    """
    Creates an instance of Reader for reading batches out of a non-Petastorm Parquet store.

    Currently, only stores having native scalar parquet data types are supported.
    Use :func:`~petastorm.make_reader` to read Petastorm Parquet stores generated with
    :func:`~petastorm.etl.dataset_metadata.materialize_dataset`.

    NOTE: only scalar columns or array type (of primitive type element) columns are currently supported.

    NOTE: If without `schema_fields` specified, the reader schema will be inferred from parquet dataset. then the
    reader schema fields order will preserve parqeut dataset fields order (partition column come first), but if
    setting `transform_spec` and specified `TransformSpec.selected_fields`, then the reader schema fields order
    will be the order of 'selected_fields'.

    :param dataset_url_or_urls: a url to a parquet directory or a url list (with the same scheme) to parquet files.
        e.g. ``'hdfs://some_hdfs_cluster/user/yevgeni/parquet8'``, or ``'file:///tmp/mydataset'``,
        or ``'s3://bucket/mydataset'``, or ``'gs://bucket/mydataset'``,
        or ``[file:///tmp/mydataset/00000.parquet, file:///tmp/mydataset/00001.parquet]``.
    :param schema_fields: A list of regex pattern strings. Only columns matching at least one of the
        patterns in the list will be loaded.
    :param reader_pool_type: A string denoting the reader pool type. Should be one of ['thread', 'process', 'dummy']
        denoting a thread pool, process pool, or running everything in the master thread. Defaults to 'thread'
    :param workers_count: An int for the number of workers to use in the reader pool. This only is used for the
        thread or process pool. Defaults to 10
    :param shuffle_row_groups: Whether to shuffle row groups (the order in which full row groups are read)
    :param shuffle_row_drop_partitions: This is is a positive integer which determines how many partitions to
        break up a row group into for increased shuffling in exchange for worse performance (extra reads).
        For example if you specify 2 each row group read will drop half of the rows within every row group and
        read the remaining rows in separate reads. It is recommended to keep this number below the regular row
        group size in order to not waste reads which drop all rows.
    :param predicate: instance of :class:`.PredicateBase` object to filter rows to be returned by reader. The predicate
        will be passed a pandas DataFrame object and must return a pandas Series with boolean values of matching
        dimensions.
    :param rowgroup_selector: instance of row group selector object to select row groups to be read
    :param num_epochs: An epoch is a single pass over all rows in the dataset. Setting ``num_epochs`` to
        ``None`` will result in an infinite number of epochs.
    :param cur_shard: An int denoting the current shard number. Each node reading a shard should
        pass in a unique shard number in the range [0, shard_count). shard_count must be supplied as well.
        Defaults to None
    :param shard_count: An int denoting the number of shards to break this dataset into. Defaults to None
    :param shard_seed: Random seed to shuffle row groups for data sharding. Defaults to None
    :param cache_type: A string denoting the cache type, if desired. Options are [None, 'null', 'local-disk'] to
        either have a null/noop cache or a cache implemented using diskcache. Caching is useful when communication
        to the main data store is either slow or expensive and the local machine has large enough storage
        to store entire dataset (or a partition of a dataset if shard_count is used). By default will be a null cache.
    :param cache_location: A string denoting the location or path of the cache.
    :param cache_size_limit: An int specifying the size limit of the cache in bytes
    :param cache_row_size_estimate: An int specifying the estimated size of a row in the dataset
    :param cache_extra_settings: A dictionary of extra settings to pass to the cache implementation,
    :param hdfs_driver: A string denoting the hdfs driver to use (if using a dataset on hdfs). Current choices are
        libhdfs (java through JNI) or libhdfs3 (C++)
    :param transform_spec: An instance of :class:`~petastorm.transform.TransformSpec` object defining how a record
        is transformed after it is loaded and decoded. The transformation occurs on a worker thread/process (depends
        on the ``reader_pool_type`` value).
    :param filters: (List[Tuple] or List[List[Tuple]]): Standard PyArrow filters.
        These will be applied when loading the parquet file with PyArrow. More information
        here: https://arrow.apache.org/docs/python/generated/pyarrow.parquet.ParquetDataset.html
    :param storage_options: Dict of kwargs forwarded to ``fsspec`` to initialize the filesystem.
    :param zmq_copy_buffers: A bool indicating whether to use 0mq copy buffers with ProcessPool.
    :param filesystem: An instance of ``pyarrow.FileSystem`` to use. Will ignore storage_options and
        other filesystem configs if it's provided.
    :return: A :class:`Reader` object
    """
    dataset_url_or_urls = normalize_dataset_url_or_urls(dataset_url_or_urls)

    filesystem, dataset_path_or_paths = get_filesystem_and_path_or_paths(
        dataset_url_or_urls,
        hdfs_driver,
        storage_options=storage_options,
        filesystem=filesystem)

    try:
        dataset_metadata.get_schema_from_dataset_url(
            dataset_url_or_urls,
            hdfs_driver=hdfs_driver,
            storage_options=storage_options,
            filesystem=filesystem)
        warnings.warn(
            'Please use make_reader (instead of \'make_batch_dataset\' function to read this dataset. '
            'You may get unexpected results. '
            'Currently make_batch_reader supports reading only Parquet stores that contain '
            'standard Parquet data types and do not require petastorm decoding.'
        )
    except PetastormMetadataError:
        pass

    if cache_type is None or cache_type == NULL_CACHE:
        cache = NullCache()
    elif cache_type == LOCAL_DISK_CACHE:
        cache = LocalDiskArrowTableCache(cache_location, cache_size_limit,
                                         cache_row_size_estimate,
                                         **cache_extra_settings or {})
    else:
        raise ValueError('Unknown cache_type: {}'.format(cache_type))

    if reader_pool_type == 'thread':
        reader_pool = ThreadPool(workers_count)
    elif reader_pool_type == 'process':
        serializer = ArrowTableSerializer()
        reader_pool = ProcessPool(workers_count,
                                  serializer,
                                  zmq_copy_buffers=zmq_copy_buffers)
    elif reader_pool_type == 'dummy':
        reader_pool = DummyPool()
    else:
        raise ValueError(
            'Unknown reader_pool_type: {}'.format(reader_pool_type))

    return Reader(filesystem,
                  dataset_path_or_paths,
                  schema_fields=schema_fields,
                  worker_class=ArrowReaderWorker,
                  reader_pool=reader_pool,
                  shuffle_row_groups=shuffle_row_groups,
                  shuffle_row_drop_partitions=shuffle_row_drop_partitions,
                  predicate=predicate,
                  rowgroup_selector=rowgroup_selector,
                  num_epochs=num_epochs,
                  cur_shard=cur_shard,
                  shard_count=shard_count,
                  shard_seed=shard_seed,
                  cache=cache,
                  transform_spec=transform_spec,
                  is_batched_reader=True,
                  filters=filters)