Exemplo n.º 1
0
def dataset_as_rdd(dataset_url, spark_session, schema_fields=None):
    """
    Retrieve a spark rdd for a given petastorm dataset

    :param dataset_url: A string for the dataset url (e.g. hdfs:///path/to/dataset)
    :param spark_session: A spark session
    :param schema_fields: list of unischema fields to subset, or None to read all fields.
    :return: A rdd of dictionary records from the dataset
    """
    dataset_url_parsed = urlparse(dataset_url)

    resolver = FilesystemResolver(
        dataset_url_parsed,
        spark_session.sparkContext._jsc.hadoopConfiguration())
    dataset = pq.ParquetDataset(resolver.parsed_dataset_url().path,
                                filesystem=resolver.filesystem(),
                                validate_schema=False)
    schema = dataset_metadata.get_schema(dataset)

    dataset_df = spark_session.read.parquet(resolver.parsed_dataset_url().path)
    if schema_fields is not None:
        # If wanting a subset of fields, create the schema view and run a select on those fields
        schema = schema.create_schema_view(schema_fields)
        field_names = [field.name for field in schema_fields]
        dataset_df = dataset_df.select(*field_names)

    dataset_rows = dataset_df.rdd\
        .map(lambda row: utils.decode_row(row.asDict(), schema))\
        .map(lambda record: schema.make_namedtuple(**record))

    return dataset_rows
 def test_file_url(self):
     """ Case 2: File path, agnostic to content of hadoop configuration."""
     suj = FilesystemResolver('file://{}'.format(ABS_PATH),
                              self._hadoop_configuration,
                              connector=self.mock)
     self.assertTrue(isinstance(suj.filesystem(), LocalFileSystem))
     self.assertEqual('', suj.parsed_dataset_url().netloc)
     self.assertEqual(ABS_PATH, suj.parsed_dataset_url().path)
 def test_hdfs_url_no_nameservice(self):
     """ Case 3b: HDFS with no nameservice should connect to default namenode."""
     suj = FilesystemResolver('hdfs:///some/path',
                              self._hadoop_configuration,
                              connector=self.mock)
     self.assertEqual(MockHdfs, type(suj.filesystem()._hdfs))
     self.assertEqual(HC.WARP_TURTLE, suj.parsed_dataset_url().netloc)
     # ensure path is preserved in parsed URL
     self.assertEqual('/some/path', suj.parsed_dataset_url().path)
     self.assertEqual(1, self.mock.connect_attempted(HC.WARP_TURTLE_NN2))
     self.assertEqual(0, self.mock.connect_attempted(HC.WARP_TURTLE_NN1))
     self.assertEqual(0, self.mock.connect_attempted(HC.DEFAULT_NN))
Exemplo n.º 4
0
 def test_hdfs_url_direct_namenode_retries(self):
     """ Case 4: direct namenode fails first two times thru, but 2nd retry succeeds."""
     self.mock.set_fail_n_next_connect(2)
     with self.assertRaises(ArrowIOError):
         suj = FilesystemResolver('hdfs://{}/path'.format(
             HC.WARP_TURTLE_NN2),
                                  self._hadoop_configuration,
                                  connector=self.mock)
     self.assertEqual(1, self.mock.connect_attempted(HC.WARP_TURTLE_NN2))
     self.assertEqual(0, self.mock.connect_attempted(HC.WARP_TURTLE_NN1))
     self.assertEqual(0, self.mock.connect_attempted(HC.DEFAULT_NN))
     with self.assertRaises(ArrowIOError):
         suj = FilesystemResolver('hdfs://{}/path'.format(
             HC.WARP_TURTLE_NN2),
                                  self._hadoop_configuration,
                                  connector=self.mock)
     self.assertEqual(2, self.mock.connect_attempted(HC.WARP_TURTLE_NN2))
     self.assertEqual(0, self.mock.connect_attempted(HC.WARP_TURTLE_NN1))
     self.assertEqual(0, self.mock.connect_attempted(HC.DEFAULT_NN))
     # this one should connect "successfully"
     suj = FilesystemResolver('hdfs://{}/path'.format(HC.WARP_TURTLE_NN2),
                              self._hadoop_configuration,
                              connector=self.mock)
     self.assertEqual(MockHdfs, type(suj.filesystem()))
     self.assertEqual(HC.WARP_TURTLE_NN2, suj.parsed_dataset_url().netloc)
     self.assertEqual(3, self.mock.connect_attempted(HC.WARP_TURTLE_NN2))
     self.assertEqual(0, self.mock.connect_attempted(HC.WARP_TURTLE_NN1))
     self.assertEqual(0, self.mock.connect_attempted(HC.DEFAULT_NN))
Exemplo n.º 5
0
 def test_s3_url(self):
     suj = FilesystemResolver('s3://bucket{}'.format(ABS_PATH),
                              self._hadoop_configuration,
                              connector=self.mock)
     self.assertTrue(isinstance(suj.filesystem(), S3FSWrapper))
     self.assertEqual('bucket', suj.parsed_dataset_url().netloc)
     self.assertEqual('bucket' + ABS_PATH, suj.get_dataset_path())
Exemplo n.º 6
0
    def test_s3_url(self):
        suj = FilesystemResolver('s3://bucket{}'.format(ABS_PATH),
                                 self._hadoop_configuration,
                                 connector=self.mock)
        self.assertTrue(isinstance(suj.filesystem(), S3FSWrapper))
        self.assertEqual('bucket', suj.parsed_dataset_url().netloc)
        self.assertEqual('bucket' + ABS_PATH, suj.get_dataset_path())

        # Make sure we did not capture FilesystemResolver in a closure by mistake
        dill.dumps(suj.filesystem_factory())
Exemplo n.º 7
0
 def test_hdfs_url_with_nameservice(self):
     """ Case 3a: HDFS nameservice."""
     suj = FilesystemResolver(HC.WARP_TURTLE_PATH,
                              self._hadoop_configuration,
                              connector=self.mock)
     self.assertEqual(MockHdfs, type(suj.filesystem()._hdfs))
     self.assertEqual(HC.WARP_TURTLE, suj.parsed_dataset_url().netloc)
     self.assertEqual(1, self.mock.connect_attempted(HC.WARP_TURTLE_NN2))
     self.assertEqual(0, self.mock.connect_attempted(HC.WARP_TURTLE_NN1))
     self.assertEqual(0, self.mock.connect_attempted(HC.DEFAULT_NN))
Exemplo n.º 8
0
 def test_hdfs_url_direct_namenode(self):
     """ Case 4: direct namenode."""
     suj = FilesystemResolver('hdfs://{}/path'.format(HC.WARP_TURTLE_NN1),
                              self._hadoop_configuration,
                              connector=self.mock)
     self.assertEqual(MockHdfs, type(suj.filesystem()))
     self.assertEqual(HC.WARP_TURTLE_NN1, suj.parsed_dataset_url().netloc)
     self.assertEqual(0, self.mock.connect_attempted(HC.WARP_TURTLE_NN2))
     self.assertEqual(1, self.mock.connect_attempted(HC.WARP_TURTLE_NN1))
     self.assertEqual(0, self.mock.connect_attempted(HC.DEFAULT_NN))
Exemplo n.º 9
0
    def test_file_url(self):
        """ Case 2: File path, agnostic to content of hadoop configuration."""
        suj = FilesystemResolver('file://{}'.format(ABS_PATH),
                                 self._hadoop_configuration,
                                 connector=self.mock)
        self.assertTrue(isinstance(suj.filesystem(), LocalFileSystem))
        self.assertEqual('', suj.parsed_dataset_url().netloc)
        self.assertEqual(ABS_PATH, suj.get_dataset_path())

        # Make sure we did not capture FilesystemResolver in a closure by mistake
        dill.dumps(suj.filesystem_factory())
def generate_petastorm_metadata(spark, dataset_url, unischema_class=None):
    """
    Generates metadata necessary to read a petastorm dataset to an existing dataset.

    :param spark: spark session
    :param dataset_url: url of existing dataset
    :param unischema_class: (optional) fully qualified dataset unischema class. If not specified will attempt
        to find one already in the dataset. (e.g. :class:`examples.hello_world.hello_world_dataset.HelloWorldSchema`)
    """
    sc = spark.sparkContext

    resolver = FilesystemResolver(dataset_url, sc._jsc.hadoopConfiguration())
    dataset = pq.ParquetDataset(resolver.parsed_dataset_url().path,
                                filesystem=resolver.filesystem(),
                                validate_schema=False)

    if unischema_class:
        schema = locate(unischema_class)
    else:

        try:
            schema = get_schema(dataset)
        except ValueError:
            raise ValueError(
                'Unischema class could not be located in existing dataset,'
                ' please specify it')

    # In order to be backwards compatible, we retrieve the common metadata from the dataset before
    # overwriting the metadata to keep row group indexes and the old row group per file index
    arrow_metadata = dataset.common_metadata or None

    with materialize_dataset(spark, dataset_url, schema):
        # Inside the materialize dataset context we just need to write the metadata file as the schema will
        # be written by the context manager.
        # We use the java ParquetOutputCommitter to write the metadata file for the existing dataset
        # which will read all the footers of the dataset in parallel and merge them.
        hadoop_config = sc._jsc.hadoopConfiguration()
        Path = sc._gateway.jvm.org.apache.hadoop.fs.Path
        parquet_output_committer = sc._gateway.jvm.org.apache.parquet.hadoop.ParquetOutputCommitter
        parquet_output_committer.writeMetaDataFile(hadoop_config,
                                                   Path(dataset_url))

    if arrow_metadata:
        # If there was the old row groups per file key or the row groups index key, add them to the new dataset metadata
        base_schema = arrow_metadata.schema.to_arrow_schema()
        metadata_dict = base_schema.metadata
        if ROW_GROUPS_PER_FILE_KEY in metadata_dict:
            add_to_dataset_metadata(dataset, ROW_GROUPS_PER_FILE_KEY,
                                    metadata_dict[ROW_GROUPS_PER_FILE_KEY])
        if ROWGROUPS_INDEX_KEY in metadata_dict:
            add_to_dataset_metadata(dataset, ROWGROUPS_INDEX_KEY,
                                    metadata_dict[ROWGROUPS_INDEX_KEY])
Exemplo n.º 11
0
    def process(self, piece_index, worker_predicate, shuffle_row_drop_partition):
        """Main worker function. Loads and returns all rows matching the predicate from a rowgroup

        Looks up the requested piece (a single row-group in a parquet file). If a predicate is specified,
        columns needed by the predicate are loaded first. If no rows in the rowgroup matches the predicate criteria
        the rest of the columns are not loaded.

        :param piece_index:
        :param shuffle_row_drop_partition: A tuple 2 of the current row drop partition and the total number
            of partitions.
        :return:
        """

        if not self._dataset:
            resolver = FilesystemResolver(self._dataset_url_parsed)
            self._dataset = pq.ParquetDataset(
                resolver.parsed_dataset_url().path,
                filesystem=resolver.filesystem(),
                validate_schema=False)

        piece = self._split_pieces[piece_index]

        # Create pyarrow file system
        parquet_file = ParquetFile(self._dataset.fs.open(piece.path))

        if not isinstance(self._local_cache, NullCache):
            if worker_predicate:
                raise RuntimeError('Local cache is not supported together with predicates, '
                                   'unless the dataset is partitioned by the column the predicate operates on.')
            if shuffle_row_drop_partition[1] != 1:
                raise RuntimeError('Local cache is not supported together with shuffle_row_drop_partitions > 1')

        if worker_predicate:
            all_cols = self._load_rows_with_predicate(parquet_file, piece, worker_predicate, shuffle_row_drop_partition)
        else:
            # Using hash of the dataset url with the relative path in order to:
            #  1. Make sure if a common cache serves multiple processes (e.g. redis), we don't have conflicts
            #  2. Dataset url is hashed, to make sure we don't create too long keys, which maybe incompatible with
            #     some cache implementations
            #  3. Still leave relative path and the piece_index in plain text to make it easier to debug
            cache_key = '{}:{}:{}'.format(hashlib.md5(urlunparse(self._dataset_url_parsed).encode('utf-8')).hexdigest(),
                                          piece.path, piece_index)
            all_cols = self._local_cache.get(cache_key,
                                             lambda: self._load_rows(parquet_file, piece, shuffle_row_drop_partition))

        if self._ngram:
            all_cols = self._ngram.form_ngram(data=all_cols, schema=self._schema)

        for item in all_cols:
            self.publish_func(item)
Exemplo n.º 12
0
def get_schema_from_dataset_url(dataset_url):
    """Returns a :class:`petastorm.unischema.Unischema` object loaded from a dataset specified by a url.

    :param dataset_url: A dataset URL
    :return: A :class:`petastorm.unischema.Unischema` object
    """
    resolver = FilesystemResolver(dataset_url)
    dataset = pq.ParquetDataset(resolver.parsed_dataset_url().path, filesystem=resolver.filesystem(),
                                validate_schema=False)

    # Get a unischema stored in the dataset metadata.
    stored_schema = get_schema(dataset)

    return stored_schema
Exemplo n.º 13
0
    def test_hdfs_url_direct_namenode(self):
        """ Case 4: direct namenode."""
        suj = FilesystemResolver('hdfs://{}/path'.format(HC.WARP_TURTLE_NN1),
                                 self._hadoop_configuration,
                                 connector=self.mock,
                                 user=self.mock_name)
        self.assertEqual(MockHdfs, type(suj.filesystem()))
        self.assertEqual(self.mock_name, suj.filesystem()._user)
        self.assertEqual(HC.WARP_TURTLE_NN1, suj.parsed_dataset_url().netloc)
        self.assertEqual(0, self.mock.connect_attempted(HC.WARP_TURTLE_NN2))
        self.assertEqual(1, self.mock.connect_attempted(HC.WARP_TURTLE_NN1))
        self.assertEqual(0, self.mock.connect_attempted(HC.DEFAULT_NN))

        # Make sure we did not capture FilesystemResolver in a closure by mistake
        dill.dumps(suj.filesystem_factory())
Exemplo n.º 14
0
    def test_hdfs_url_with_nameservice(self):
        """ Case 3a: HDFS nameservice."""
        suj = FilesystemResolver(HC.WARP_TURTLE_PATH,
                                 self._hadoop_configuration,
                                 connector=self.mock,
                                 user=self.mock_name)
        self.assertEqual(MockHdfs, type(suj.filesystem()._hdfs))
        self.assertEqual(self.mock_name, suj.filesystem()._user)
        self.assertEqual(HC.WARP_TURTLE, suj.parsed_dataset_url().netloc)
        self.assertEqual(1, self.mock.connect_attempted(HC.WARP_TURTLE_NN2))
        self.assertEqual(0, self.mock.connect_attempted(HC.WARP_TURTLE_NN1))
        self.assertEqual(0, self.mock.connect_attempted(HC.DEFAULT_NN))

        # Make sure we did not capture FilesystemResolver in a closure by mistake
        dill.dumps(suj.filesystem_factory())
def build_rowgroup_index(dataset_url, spark_context, indexers):
    """
    Build index for given list of fields to use for fast rowgroup selection
    :param dataset_url: (str) the url for the dataset (or a path if you would like to use the default hdfs config)
    :param spark_context: (SparkContext)
    :param indexers: list of objects to build row groups indexes. Should support RowGroupIndexerBase interface
    :return: None, upon successful completion the rowgroup predicates will be saved to _metadata file
    """

    if dataset_url and dataset_url[-1] == '/':
        dataset_url = dataset_url[:-1]

    # Create pyarrow file system
    resolver = FilesystemResolver(dataset_url,
                                  spark_context._jsc.hadoopConfiguration())
    dataset = pq.ParquetDataset(resolver.parsed_dataset_url().path,
                                filesystem=resolver.filesystem(),
                                validate_schema=False)

    split_pieces = dataset_metadata.load_row_groups(dataset)
    schema = dataset_metadata.get_schema(dataset)

    # We need direct reference on partitions object
    partitions = dataset.partitions
    pieces_num = len(split_pieces)
    piece_info_list = []
    for piece_index in range(pieces_num):
        #  indexes relies on the ordering of the split dataset pieces.
        # This relies on how the dataset pieces are split and sorted which although should not change,
        # still might and we should make sure not to forget that could break this.
        piece = split_pieces[piece_index]
        piece_info_list.append(
            PieceInfo(piece_index, piece.path, piece.row_group,
                      piece.partition_keys))

    start_time = time.time()
    piece_info_rdd = spark_context.parallelize(
        piece_info_list, min(len(piece_info_list), PARALLEL_SLICE_NUM))
    indexer_rdd = piece_info_rdd.map(lambda piece_info: _index_columns(
        piece_info, dataset_url, partitions, indexers, schema))
    indexer_list = indexer_rdd.reduce(_combine_indexers)

    indexer_dict = {indexer.index_name: indexer for indexer in indexer_list}
    serialized_indexers = pickle.dumps(indexer_dict, pickle.HIGHEST_PROTOCOL)
    utils.add_to_dataset_metadata(dataset, ROWGROUPS_INDEX_KEY,
                                  serialized_indexers)
    logger.info("Elapsed time of index creation: %f s",
                (time.time() - start_time))
Exemplo n.º 16
0
    def test_hdfs_url_no_nameservice(self):
        """ Case 3b: HDFS with no nameservice should connect to default namenode."""
        suj = FilesystemResolver('hdfs:///some/path',
                                 self._hadoop_configuration,
                                 connector=self.mock,
                                 user=self.mock_name)
        self.assertEqual(MockHdfs, type(suj.filesystem()._hdfs))
        self.assertEqual(self.mock_name, suj.filesystem()._user)
        self.assertEqual(HC.WARP_TURTLE, suj.parsed_dataset_url().netloc)
        # ensure path is preserved in parsed URL
        self.assertEqual('/some/path', suj.get_dataset_path())
        self.assertEqual(1, self.mock.connect_attempted(HC.WARP_TURTLE_NN2))
        self.assertEqual(0, self.mock.connect_attempted(HC.WARP_TURTLE_NN1))
        self.assertEqual(0, self.mock.connect_attempted(HC.DEFAULT_NN))

        # Make sure we did not capture FilesystemResolver in a closure by mistake
        dill.dumps(suj.filesystem_factory())
Exemplo n.º 17
0
def materialize_dataset(spark, dataset_url, schema, row_group_size_mb=None):
    """
    A Context Manager which handles all the initialization and finalization necessary
    to generate metadata for a petastorm dataset. This should be used around your
    spark logic to materialize a dataset (specifically the writing of parquet output).

    Note: Any rowgroup indexing should happen outside the materialize_dataset block

    e.g.
    spark = SparkSession.builder...
    dataset_url = 'hdfs:///path/to/my/dataset'
    with materialize_dataset(spark, dataset_url, MyUnischema, 64):
      spark.sparkContext.parallelize(range(0, 10)).\
        ...
        .write.parquet(dataset_url)

    indexers = [SingleFieldIndexer(...)]
    build_rowgroup_index(dataset_url, spark.sparkContext, indexers)

    :param spark The spark session you are using
    :param dataset_url The dataset url to output your dataset to (e.g. hdfs:///path/to/dataset)
    :param schema The unischema definition of your dataset
    :param row_group_size_mb The parquet row group size to use for your dataset
    """
    spark_config = {}
    _init_spark(spark, spark_config, row_group_size_mb)
    yield

    # After job completes, add the unischema metadata and check for the metadata summary file
    resolver = FilesystemResolver(
        dataset_url, spark.sparkContext._jsc.hadoopConfiguration())
    dataset = pq.ParquetDataset(resolver.parsed_dataset_url().path,
                                filesystem=resolver.filesystem(),
                                validate_schema=False)

    _generate_unischema_metadata(dataset, schema)
    if not dataset.metadata_path:
        raise MetadataGenerationError(
            'Could not find summary metadata file. The dataset will exist but you will need'
            ' to execute petastorm-generate-metadata before you can read your dataset '
            ' in order to generate the necessary metadata.'
            ' Try increasing spark driver memory next time and making sure you are'
            ' using parquet-mr >= 1.8.3')

    _cleanup_spark(spark, spark_config, row_group_size_mb)
    def __init__(self, dataset_url, schema, ngram, local_cache,
                 worker_predicate):
        """RowGroupLoader responsible for loading one rowgroup at a time. Rows returned are returned encoded.

        :param dataset_url: A url of a parquet dataset.
        :param schema: A unischema corresponding to the data in the dataset
        :param ngram: An instance of NGram if ngrams should be read or None, if each row in the dataset corresponds to
          a single sample returned.
        :param local_cache: An instance of a rowgroup cache (CacheBase interface) object to be used.
        :param worker_predicate: An instance of predicate (PredicateBase interface)
        """
        self._dataset_url_parsed = urlparse(dataset_url)
        self._schema = schema
        self._ngram = ngram
        self._local_cache = local_cache
        self._worker_predicate = worker_predicate

        resolver = FilesystemResolver(self._dataset_url_parsed)
        self._dataset = pq.ParquetDataset(resolver.parsed_dataset_url().path,
                                          filesystem=resolver.filesystem(),
                                          validate_schema=False)
Exemplo n.º 19
0
def make_batch_reader(dataset_url,
                      schema_fields=None,
                      reader_pool_type='thread',
                      workers_count=10,
                      shuffle_row_groups=True,
                      shuffle_row_drop_partitions=1,
                      predicate=None,
                      rowgroup_selector=None,
                      num_epochs=1,
                      cur_shard=None,
                      shard_count=None,
                      cache_type='null',
                      cache_location=None,
                      cache_size_limit=None,
                      cache_row_size_estimate=None,
                      cache_extra_settings=None,
                      hdfs_driver='libhdfs3'):
    """
    Creates an instance of Reader for reading batches out of a non-Petastorm Parquet store.

    Currently, only stores having native scalar parquet data types are supported.
    Use :func:`~petastorm.make_reader` to read Petastorm Parquet stores generated with
    :func:`~petastorm.etl.dataset_metadata.materialize_dataset`.

    NOTE: only scalar columns are currently supported.

    :param dataset_url: an filepath or a url to a parquet directory,
        e.g. ``'hdfs://some_hdfs_cluster/user/yevgeni/parquet8'``, or ``'file:///tmp/mydataset'``
        or ``'s3://bucket/mydataset'``.
    :param schema_fields: A list of regex pattern strings. Only columns matching at least one of the
        patterns in the list will be loaded.
    :param reader_pool_type: A string denoting the reader pool type. Should be one of ['thread', 'process', 'dummy']
        denoting a thread pool, process pool, or running everything in the master thread. Defaults to 'thread'
    :param workers_count: An int for the number of workers to use in the reader pool. This only is used for the
        thread or process pool. Defaults to 10
    :param shuffle_row_groups: Whether to shuffle row groups (the order in which full row groups are read)
    :param shuffle_row_drop_partitions: This is is a positive integer which determines how many partitions to
        break up a row group into for increased shuffling in exchange for worse performance (extra reads).
        For example if you specify 2 each row group read will drop half of the rows within every row group and
        read the remaining rows in separate reads. It is recommended to keep this number below the regular row
        group size in order to not waste reads which drop all rows.
    :param predicate: instance of :class:`.PredicateBase` object to filter rows to be returned by reader. The predicate
        will be passed a pandas DataFrame object and must return a pandas Series with boolean values of matching
        dimensions.
    :param rowgroup_selector: instance of row group selector object to select row groups to be read
    :param num_epochs: An epoch is a single pass over all rows in the dataset. Setting ``num_epochs`` to
        ``None`` will result in an infinite number of epochs.
    :param cur_shard: An int denoting the current shard number. Each node reading a shard should
        pass in a unique shard number in the range [0, shard_count). shard_count must be supplied as well.
        Defaults to None
    :param shard_count: An int denoting the number of shards to break this dataset into. Defaults to None
    :param cache_type: A string denoting the cache type, if desired. Options are [None, 'null', 'local-disk'] to
        either have a null/noop cache or a cache implemented using diskcache. Caching is useful when communication
        to the main data store is either slow or expensive and the local machine has large enough storage
        to store entire dataset (or a partition of a dataset if shard_count is used). By default will be a null cache.
    :param cache_location: A string denoting the location or path of the cache.
    :param cache_size_limit: An int specifying the size limit of the cache in bytes
    :param cache_row_size_estimate: An int specifying the estimated size of a row in the dataset
    :param cache_extra_settings: A dictionary of extra settings to pass to the cache implementation,
    :param hdfs_driver: A string denoting the hdfs driver to use (if using a dataset on hdfs). Current choices are
        libhdfs (java through JNI) or libhdfs3 (C++)
    :return: A :class:`Reader` object
    """

    if dataset_url is None or not isinstance(dataset_url, six.string_types):
        raise ValueError("""dataset_url must be a string""")

    dataset_url = dataset_url[:-1] if dataset_url[-1] == '/' else dataset_url
    logger.debug('dataset_url: %s', dataset_url)

    resolver = FilesystemResolver(dataset_url, hdfs_driver=hdfs_driver)
    filesystem = resolver.filesystem()

    dataset_path = resolver.parsed_dataset_url().path

    if cache_type is None or cache_type == 'null':
        cache = NullCache()
    elif cache_type == 'local-disk':
        cache = LocalDiskArrowTableCache(cache_location, cache_size_limit,
                                         cache_row_size_estimate,
                                         **cache_extra_settings or {})
    else:
        raise ValueError('Unknown cache_type: {}'.format(cache_type))

    if reader_pool_type == 'thread':
        reader_pool = ThreadPool(workers_count)
    elif reader_pool_type == 'process':
        serializer = ArrowTableSerializer()
        reader_pool = ProcessPool(workers_count, serializer)
    elif reader_pool_type == 'dummy':
        reader_pool = DummyPool()
    else:
        raise ValueError(
            'Unknown reader_pool_type: {}'.format(reader_pool_type))

    return Reader(filesystem,
                  dataset_path,
                  schema_fields=schema_fields,
                  worker_class=ArrowReaderWorker,
                  reader_pool=reader_pool,
                  shuffle_row_groups=shuffle_row_groups,
                  shuffle_row_drop_partitions=shuffle_row_drop_partitions,
                  predicate=predicate,
                  rowgroup_selector=rowgroup_selector,
                  num_epochs=num_epochs,
                  cur_shard=cur_shard,
                  shard_count=shard_count,
                  cache=cache)
Exemplo n.º 20
0
def materialize_dataset(spark,
                        dataset_url,
                        schema,
                        row_group_size_mb=None,
                        use_summary_metadata=False):
    """
    A Context Manager which handles all the initialization and finalization necessary
    to generate metadata for a petastorm dataset. This should be used around your
    spark logic to materialize a dataset (specifically the writing of parquet output).

    Note: Any rowgroup indexing should happen outside the materialize_dataset block

    Example:

    >>> spark = SparkSession.builder...
    >>> ds_url = 'hdfs:///path/to/my/dataset'
    >>> with materialize_dataset(spark, ds_url, MyUnischema, 64):
    >>>   spark.sparkContext.parallelize(range(0, 10)).
    >>>     ...
    >>>     .write.parquet(ds_url)
    >>> indexer = [SingleFieldIndexer(...)]
    >>> build_rowgroup_index(ds_url, spark.sparkContext, indexer)

    :param spark: The spark session you are using
    :param dataset_url: The dataset url to output your dataset to (e.g. ``hdfs:///path/to/dataset``)
    :param schema: The :class:`petastorm.unischema.Unischema` definition of your dataset
    :param row_group_size_mb: The parquet row group size to use for your dataset
    :param use_summary_metadata: Whether to use the parquet summary metadata for row group indexing or a custom
            indexing method. The custom indexing method is more scalable for very large datasets.
    """
    spark_config = {}
    _init_spark(spark, spark_config, row_group_size_mb, use_summary_metadata)
    yield

    # After job completes, add the unischema metadata and check for the metadata summary file
    resolver = FilesystemResolver(
        dataset_url, spark.sparkContext._jsc.hadoopConfiguration())
    dataset = pq.ParquetDataset(resolver.parsed_dataset_url().path,
                                filesystem=resolver.filesystem(),
                                validate_schema=False)

    _generate_unischema_metadata(dataset, schema)
    if not use_summary_metadata:
        _generate_num_row_groups_per_file(dataset, spark.sparkContext)

    # Reload the dataset to take into account the new metadata
    dataset = pq.ParquetDataset(resolver.parsed_dataset_url().path,
                                filesystem=resolver.filesystem(),
                                validate_schema=False)
    try:
        # Try to load the row groups, if it fails that means the metadata was not generated properly
        load_row_groups(dataset)
    except PetastormMetadataError:
        raise PetastormMetadataGenerationError(
            'Could not find summary metadata file. The dataset will exist but you will need'
            ' to execute petastorm-generate-metadata.py before you can read your dataset '
            ' in order to generate the necessary metadata.'
            ' Try increasing spark driver memory next time and making sure you are'
            ' using parquet-mr >= 1.8.3')

    _cleanup_spark(spark, spark_config, row_group_size_mb)
    parser.add_argument('--print-values',
                        action='store_true',
                        help='Print index values (dataset piece indexes)')
    parser.add_argument('--skip-index',
                        nargs='+',
                        type=str,
                        help='Donot display indexed values for given fields')

    args = parser.parse_args()

    if args.dataset_url and args.dataset_url[-1] == '/':
        args.dataset_url = args.dataset_url[:-1]

    # Create pyarrow file system
    resolver = FilesystemResolver(args.dataset_url)
    dataset = pq.ParquetDataset(resolver.parsed_dataset_url().path,
                                filesystem=resolver.filesystem(),
                                validate_schema=False)

    print_all = not args.schema and not args.index
    if args.schema or print_all:
        print('*** Schema from dataset metadata ***')
        print((dataset_metadata.get_schema(dataset)))

    if args.index or print_all:
        index_dict = rowgroup_indexing.get_row_group_indexes(dataset)
        print('*** Row group indexes from dataset metadata ***')
        for index_name in index_dict:
            print(('Index: {}'.format(index_name)))
            if args.skip_index is None or index_name not in args.skip_index:
                for field_value in index_dict[index_name].indexed_values:
Exemplo n.º 22
0
    def __init__(self, dataset_url, schema_fields=None, shuffle=None, predicate=None, rowgroup_selector=None,
                 num_epochs=1, sequence=None, training_partition=None, num_training_partitions=None,
                 read_timeout_s=None, cache=None, loader_pool=None, decoder_pool=None, shuffling_queue=None,
                 shuffle_options=None, pyarrow_filesystem=None):
        """Initializes a reader object.

        :param dataset_url: an filepath or a url to a parquet directory,
                       e.g. 'hdfs://some_hdfs_cluster/user/yevgeni/parquet8', or '/tmp/mydataset'
        :param schema_fields:
            Either list of unischema fields to subset, or None to read all fields.
            OR an NGram object, then it will return an NGram of the specified properties.
        :param predicate: instance of predicate object to filter rows to be returned by reader.
        :param rowgroup_selector: instance of row group selector object to select row groups to be read
        :param reader_pool: parallelization pool. ThreadPool(10) (10 threads) is used by default.
                       This pool is a custom implementation used to parallelize reading data from the dataset.
                       Any object from workers_pool package can be used (e.g. ProcessPool)
        :param num_epochs: An epoch is a single pass over all samples in the dataset. Setting num_epochs to 'None' will
                       result in an infinite number of epochs.
        :param sequence: This is deprecated. To use sequence/ngram, please supply the argument in schema_fields instead.
        :param training_partition: An int denoting the partition number used for multi node training. Each node should
                       pass in a unique partition number in the range [0, num_training_partitions).
                       num_training_partitions must be supplied as well.
        :param num_training_partitions An int denoting the number of training partitions (how many nodes are performing
                       the multi node training)
        :param read_timeout_s: A numeric with the amount of time in seconds you would like to give a read before it
                       times out and raises an EmptyResultError. Pass in None for an infinite timeout
        :param cache: An object conforming to `cache.CacheBase` interface. Before loading row groups from a parquet file
                       the Reader will attempt to load these values from cache. Caching is useful when communication
                       to the main data store is either slow or expensive and the local machine has large enough storage
                       to store entire dataset (or a partition of a dataset if num_training_partitions is used).
        :param decoder_pool: An instance of a concurrent.futures pool executor used for decoding. If None,
          a default ThreadPoolExecutor(5) will be used.
        :param loader_pool: An instance of a concurrent.futures pool executor used for decoding. If None,
          a default ThreadPoolExecutor(5) will be used.
        :param shuffle_options : ShuffleOptions object to describe how to shuffle dataset (supercedes shuffle parameter)
                       defaults to shuffling row groups but not to drop rows based on partitions.
        :param shuffle: DEPRECATED boolean whether to shuffle the row group order. Use shuffle_row_groups in
                       ShuffleOptions instead.

        By default, `NullCache` implementation
        """

        # 1. Resolve dataset path (hdfs://, file://) and open the parquet storage (dataset)
        # 2. Get a list of all groups
        # 3. Filter rowgroups
        #    a. predicates
        #    b. row-group selector (our indexing mechanism)
        #    c. partition: used to get a subset of data for distributed training
        # 4. Launch a new thread running `worker_loop` function.

        if dataset_url is None or not isinstance(dataset_url, six.string_types):
            raise ValueError("""dataset_url must be a string""")

        if not (isinstance(schema_fields, collections.Iterable) or isinstance(schema_fields, NGram)
                or schema_fields is None):
            raise ValueError("""Fields must be either None, an iterable collection of Unischema fields or an NGram
            object.""")

        if sequence is not None:
            raise ValueError("""'sequence' argument of Reader object is deprecated. Please pass an NGram instance to
            'schema_fields' argument instead.""")

        self.ngram = schema_fields if isinstance(schema_fields, NGram) else None

        if self.ngram and not self.ngram.timestamp_overlap and shuffle_options.shuffle_row_drop_partitions > 1:
            raise NotImplementedError('Using timestamp_overlap=False is not implemented with'
                                      ' shuffle_options.shuffle_row_drop_partitions > 1')

        cache = cache or NullCache()
        dataset_url = dataset_url[:-1] if dataset_url[-1] == '/' else dataset_url

        if shuffle_options is None:
            if shuffle is None:
                shuffle = True
            else:
                logger.warning('shuffle option is deprecated. Please use shuffle_options instead')
            shuffle_options = ShuffleOptions(shuffle)

        # 1. Resolve dataset path (hdfs://, file://) and open the parquet storage (dataset)
        logger.debug('dataset_url: %s', dataset_url)

        if pyarrow_filesystem is not None:
            filesystem = pyarrow_filesystem
            dataset_path = urlparse(dataset_url).path
        else:
            resolver = FilesystemResolver(dataset_url)
            filesystem = resolver.filesystem()
            dataset_path = resolver.parsed_dataset_url().path

        self._dataset = pq.ParquetDataset(dataset_path, filesystem=filesystem, validate_schema=False)

        self._normalize_shuffle_options(shuffle_options, self._dataset)

        # Get a unischema stored in the dataset metadata.
        stored_schema = dataset_metadata.get_schema(self._dataset)

        # Make a schema view (a view is a Unischema containing only a subset of fields
        # Will raise an exception if invalid schema fields are in schema_fields
        fields = schema_fields if isinstance(schema_fields, collections.Iterable) else None
        self.schema = stored_schema.create_schema_view(fields) if fields else stored_schema

        # 2. Get a list of all groups
        row_groups = dataset_metadata.load_row_groups(self._dataset)

        # 3. Filter rowgroups
        filtered_row_groups, worker_predicate = self._filter_row_groups(self._dataset, row_groups, predicate,
                                                                        rowgroup_selector, training_partition,
                                                                        num_training_partitions)

        epoch_items = self._apply_row_drop_partition(filtered_row_groups, shuffle_options)

        # 4. Launch a new thread running `worker_loop` function.
        epochs_iterator = lambda: epoch_generator(epoch_items, num_epochs, shuffle_options.shuffle_row_groups)

        self._results_queue = Queue(_OUTPUT_QUEUE_SIZE)

        loader = RowGroupLoader(dataset_url, self.schema, self.ngram, cache, worker_predicate)
        decoder = RowDecoder(self.schema, self.ngram)
        self._loader_pool = loader_pool or ThreadPoolExecutor(5)
        self._decoder_pool = decoder_pool or ThreadPoolExecutor(5)
        self._stop_flow_manager_event = threading.Event()
        self._diags = Counter()

        if not shuffling_queue:
            shuffling_queue = NoopShufflingBuffer()

        self._flow_manager_thread = threading.Thread(target=worker_loop,
                                                     args=(epochs_iterator, self._loader_pool, loader,
                                                           self._decoder_pool,
                                                           decoder,
                                                           shuffling_queue, self._results_queue,
                                                           self._stop_flow_manager_event, self._diags))
        self._flow_manager_thread.daemon = True
        self._flow_manager_thread.start()

        self._read_timeout_s = read_timeout_s
Exemplo n.º 23
0
    def __init__(self,
                 dataset_url,
                 schema_fields=None,
                 shuffle=None,
                 predicate=None,
                 rowgroup_selector=None,
                 reader_pool=None,
                 num_epochs=1,
                 sequence=None,
                 training_partition=None,
                 num_training_partitions=None,
                 read_timeout_s=None,
                 cache=None,
                 shuffle_options=None):
        """Initializes a reader object.

        :param dataset_url: an filepath or a url to a parquet directory,
            e.g. ``'hdfs://some_hdfs_cluster/user/yevgeni/parquet8'``, or ``'/tmp/mydataset'``.
        :param schema_fields: Either list of unischema fields to subset, or ``None`` to read all fields.
            OR an NGram object, then it will return an NGram of the specified properties.
        :param predicate: instance of predicate object to filter rows to be returned by reader.
        :param rowgroup_selector: instance of row group selector object to select row groups to be read
        :param reader_pool: parallelization pool. ``ThreadPool(10)`` (10 threads) is used by default.
            This pool is a custom implementation used to parallelize reading data from the dataset.
            Any object from workers_pool package can be used
            (e.g. :class:`petastorm.workers_pool.process_pool.ProcessPool`).
        :param num_epochs: An epoch is a single pass over all samples in the dataset. Setting ``num_epochs`` to
            ``None`` will result in an infinite number of epochs.
        :param training_partition: An int denoting the partition number used for multi node training. Each node should
            pass in a unique partition number in the range ``[0, num_training_partitions)``.
            ``num_training_partitions`` must be supplied as well.
        :param num_training_partitions: An int denoting the number of training partitions (how many nodes are performing
            the multi node training).
        :param read_timeout_s: A numeric with the amount of time in seconds you would like to give a read before it
            times out and raises an EmptyResultError. Pass in None for an infinite timeout.
        :param cache: An object conforming to :class:`.CacheBase` interface. Before loading row groups from a parquet
            file the Reader will attempt to load these values from cache. Caching is useful when communication
            to the main data store is either slow or expensive and the local machine has large enough storage
            to store entire dataset (or a partition of a dataset if num_training_partitions is used).
            By default, use the :class:`.NullCache` implementation.
        :param shuffle_options: ShuffleOptions object to describe how to shuffle dataset (supercedes shuffle parameter)
            defaults to shuffling row groups but not to drop rows based on partitions.
        :param sequence: *DEPRECATED* To use sequence/ngram, please supply the argument in
            ``schema_fields`` instead.
        :param shuffle: *DEPRECATED* Boolean whether to shuffle the row group order.
            Use ``shuffle_row_groups`` in :class:`.ShuffleOptions` instead.
        """

        # 1. Resolve dataset path (hdfs://, file://) and open the parquet storage (dataset)
        # 2. Get a list of all groups
        # 3. Filter rowgroups
        #    a. predicates
        #    b. row-group selector (our indexing mechanism)
        #    c. partition: used to get a subset of data for distributed training
        # 4. Create a rowgroup ventilator object
        # 5. Start workers pool
        if dataset_url is None or not isinstance(dataset_url,
                                                 six.string_types):
            raise ValueError("""dataset_url must be a string""")

        if not (isinstance(schema_fields, collections.Iterable)
                or isinstance(schema_fields, NGram) or schema_fields is None):
            raise ValueError(
                """Fields must be either None, an iterable collection of Unischema fields or an NGram
            object.""")

        if sequence is not None:
            raise ValueError(
                """'sequence' argument of Reader object is deprecated. Please pass an NGram instance to
            'schema_fields' argument instead.""")

        self.ngram = schema_fields if isinstance(schema_fields,
                                                 NGram) else None

        if self.ngram and not self.ngram.timestamp_overlap and shuffle_options.shuffle_row_drop_partitions > 1:
            raise NotImplementedError(
                'Using timestamp_overlap=False is not implemented with'
                ' shuffle_options.shuffle_row_drop_partitions > 1')

        cache = cache or NullCache()
        dataset_url = dataset_url[:-1] if dataset_url[
            -1] == '/' else dataset_url
        self._workers_pool = reader_pool or ThreadPool(10)

        # 1. Resolve dataset path (hdfs://, file://) and open the parquet storage (dataset)
        logger.debug('dataset_url: %s', dataset_url)
        resolver = FilesystemResolver(dataset_url)
        self.dataset = pq.ParquetDataset(resolver.parsed_dataset_url().path,
                                         filesystem=resolver.filesystem(),
                                         validate_schema=False)

        # Get a unischema stored in the dataset metadata.
        stored_schema = dataset_metadata.get_schema(self.dataset)

        # Make a schema view (a view is a Unischema containing only a subset of fields
        # Will raise an exception if invalid schema fields are in schema_fields
        fields = schema_fields if isinstance(schema_fields,
                                             collections.Iterable) else None
        self.schema = stored_schema.create_schema_view(
            fields) if fields else stored_schema

        # 2. Get a list of all groups
        row_groups = dataset_metadata.load_row_groups(self.dataset)

        # 3. Filter rowgroups
        filtered_row_group_indexes, worker_predicate = self._filter_row_groups(
            self.dataset, row_groups, predicate, rowgroup_selector,
            training_partition, num_training_partitions)
        # 4. Create a rowgroup ventilator object
        if shuffle_options is None:
            if shuffle is None:
                shuffle = True
            else:
                logger.warning(
                    'shuffle option is deprecated. Please use shuffle_options instead'
                )
            shuffle_options = ShuffleOptions(shuffle)
        self._normalize_shuffle_options(shuffle_options, self.dataset)
        ventilator = self._create_ventilator(filtered_row_group_indexes,
                                             shuffle_options, num_epochs,
                                             worker_predicate)

        # 5. Start workers pool
        self._workers_pool.start(ReaderWorker,
                                 (dataset_url, self.schema, self.ngram,
                                  row_groups, cache, worker_predicate),
                                 ventilator=ventilator)
        self._read_timeout_s = read_timeout_s