Пример #1
0
def _transform_and_write_tfr(
    dataset: pvalue.PCollection,
    tfr_writer: Callable[[], beam.io.tfrecordio.WriteToTFRecord],
    raw_metadata: types.BeamDatasetMetadata,
    preprocessing_fn: Optional[Callable] = None,
    transform_fn: Optional[types.TransformFn] = None,
    label: str = 'data'):
  """Applies TF Transform to dataset and outputs it as TFRecords."""

  dataset_metadata = (dataset, raw_metadata)

  if transform_fn:
    transformed_dataset, transformed_metadata = (
        (dataset_metadata, transform_fn)
        | f'Transform{label}' >> tft_beam.TransformDataset())
  else:
    if not preprocessing_fn:
      preprocessing_fn = lambda x: x
    (transformed_dataset, transformed_metadata), transform_fn = (
        dataset_metadata
        | f'AnalyzeAndTransform{label}' >>
        tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))

  transformed_data_coder = tft.coders.ExampleProtoCoder(
      transformed_metadata.schema)
  _ = (
      transformed_dataset
      | f'Encode{label}' >> beam.Map(transformed_data_coder.encode)
      | f'Write{label}' >> tfr_writer(prefix=label.lower()))

  return transform_fn
Пример #2
0
    def expand(self, pipeline):
        # TODO(b/147620802): Consider making this (and other parameters)
        # configurable to test more variants (e.g. with and without deep-copy
        # optimisation, with and without cache, etc).
        with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
            converter = tft.coders.ExampleProtoCoder(self._tf_metadata_schema,
                                                     serialized=False)
            raw_data = (
                pipeline
                |
                "ReadDataset" >> beam.Create(self._dataset.read_raw_dataset())
                | "Decode" >> beam.Map(converter.decode))
            transform_fn, output_metadata = (
                (raw_data, self._transform_input_dataset_metadata)
                | "AnalyzeDataset" >> tft_beam.AnalyzeDataset(
                    self._preprocessing_fn))

            if self._generate_dataset:
                _ = transform_fn | "CopySavedModel" >> _CopySavedModel(
                    dest_path=self._dataset.tft_saved_model_path())

            (transformed_dataset, transformed_metadata) = (
                ((raw_data, self._transform_input_dataset_metadata),
                 (transform_fn, output_metadata))
                | "TransformDataset" >> tft_beam.TransformDataset())
            return transformed_dataset, transformed_metadata
Пример #3
0
  def expand(self, pipeline):
    # TODO(b/147620802): Consider making this (and other parameters)
    # configurable to test more variants (e.g. with and without deep-copy
    # optimisation, with and without cache, etc).
    with tft_beam.Context(
        temp_dir=tempfile.mkdtemp(),
        force_tf_compat_v1=self._force_tf_compat_v1):
      raw_data = (
          pipeline
          | "ReadDataset" >> beam.Create(
              self._dataset.read_raw_dataset(
                  deserialize=False, limit=self._max_num_examples))
          | "Decode" >> self._tfxio.BeamSource())
      transform_fn, output_metadata = (
          (raw_data, self._tfxio.TensorAdapterConfig())
          | "AnalyzeDataset" >> tft_beam.AnalyzeDataset(self._preprocessing_fn))

      if self._generate_dataset:
        _ = transform_fn | "CopySavedModel" >> _CopySavedModel(
            dest_path=self._dataset.tft_saved_model_path(
                self._force_tf_compat_v1))

      (transformed_dataset, transformed_metadata) = (
          ((raw_data, self._tfxio.TensorAdapterConfig()),
           (transform_fn, output_metadata))
          | "TransformDataset" >> tft_beam.TransformDataset())
      return transformed_dataset, transformed_metadata
Пример #4
0
def transform_tft(train_data, test_data, working_dir):
    options = PipelineOptions()
    options.view_as(StandardOptions).runner = 'DirectRunner'
    with beam.Pipeline(options=options) as pipeline:
        with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
            data_shape = train_data[0][0].shape
            raw_data = (
                pipeline | 'ReadTrainData' >> beam.Create(train_data)
                | 'CreateTrainData' >> beam.Map(lambda data: format(data)))
            raw_data_metadata = dataset_metadata.DatasetMetadata(
                dataset_schema.from_feature_spec({
                    IMAGE_KEY:
                    tf.FixedLenFeature(list(data_shape), tf.float32),
                    LABEL_KEY:
                    tf.FixedLenFeature([], tf.int64)
                }))
            raw_dataset = (raw_data, raw_data_metadata)
            transformed_dataset, transform_fn = (
                raw_dataset
                | tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))
            transformed_data, transformed_metadata = transformed_dataset
            transformed_data_coder = tft.coders.ExampleProtoCoder(
                transformed_metadata.schema)

            _ = (
                transformed_data
                | 'EncodeTrainData' >> beam.Map(transformed_data_coder.encode)
                | 'WriteTrainData' >> beam.io.WriteToTFRecord(
                    os.path.join(working_dir, TRANSFORMED_TRAIN_DATA_FILEBASE),
                    file_name_suffix='.tfrecords'))

            raw_test_data = (
                pipeline | 'ReadTestData' >> beam.Create(test_data)
                | 'CreateTestData' >> beam.Map(lambda data: format(data)))
            raw_test_dataset = (raw_test_data, raw_data_metadata)

            transformed_test_dataset = ((raw_test_dataset, transform_fn)
                                        | tft_beam.TransformDataset())
            # Don't need transformed data schema, it's the same as before.
            transformed_test_data, _ = transformed_test_dataset

            _ = (transformed_test_data
                 | 'EncodeTestData' >> beam.Map(transformed_data_coder.encode)
                 | 'WriteTestData' >> beam.io.WriteToTFRecord(
                     os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE),
                     file_name_suffix='.tfrecords'))

            _ = (transform_fn |
                 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir))
Пример #5
0
def transform_and_write(pcollection, input_metadata, output_dir, transform_fn,
                        file_prefix):
  """Transforms data and writes results to local disc or Cloud Storage bucket.

  Args:
    pcollection: Pipeline data.
    input_metadata: DatasetMetadata object for given input data.
    output_dir: Directory to write transformed output.
    transform_fn: TensorFlow transform function.
    file_prefix: File prefix to add to output file.
  """
  shuffled_data = (pcollection | 'RandomizeData' >> beam.transforms.Reshuffle())
  (transformed_data,
   transformed_metadata) = (((shuffled_data, input_metadata), transform_fn)
                            | 'Transform' >> tft_beam.TransformDataset())
  coder = example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)
  (transformed_data
   | 'SerializeExamples' >> beam.Map(coder.encode)
   | 'WriteExamples' >> beam.io.WriteToTFRecord(
       os.path.join(output_dir, file_prefix),
       file_name_suffix=_FILE_NAME_SUFFIX))
Пример #6
0
            def encode_data(data_path, prefix, output_filename):
                # Apply transform function to test data.
                raw_data = (
                    pipeline
                    |
                    'ReadData' + prefix >> beam.io.ReadFromParquet(data_path))

                raw_dataset = (raw_data, RAW_DATA_METADATA)

                transformed_dataset = (
                    (raw_dataset, transform_fn)
                    | 'Transform' + prefix >> tft_beam.TransformDataset())

                # Don't need transformed data schema, it's the same as before.
                transformed_data, _ = transformed_dataset

                _ = (transformed_data
                     | 'EncodeData' + prefix >> beam.Map(
                         transformed_data_coder.encode)
                     | 'WriteData' + prefix >> beam.io.WriteToTFRecord(
                         os.path.join(working_dir, output_filename)))
def _main(argv=None):
    logging.getLogger().setLevel(logging.INFO)

    parser = argparse.ArgumentParser()
    parser.add_argument('--raw_examples_path', required=True)
    parser.add_argument('--raw_examples_schema_path', required=True)
    parser.add_argument('--transform_fn_dir', required=True)
    parser.add_argument('--transformed_examples_path_prefix', required=True)
    known_args, pipeline_args = parser.parse_known_args(argv)

    raw_examples_schema = load_schema(known_args.raw_examples_schema_path)
    raw_examples_coder = tft.coders.ExampleProtoCoder(raw_examples_schema)
    raw_examples_metadata = dataset_metadata.DatasetMetadata(
        raw_examples_schema)

    pipeline_options = PipelineOptions(pipeline_args)
    pipeline_options.view_as(SetupOptions).save_main_session = True

    with beam.Pipeline(options=pipeline_options) as pipeline:
        with tft_beam.Context(temp_dir=get_beam_temp_dir(pipeline_options)):
            transform_fn = pipeline | tft_beam.ReadTransformFn(
                known_args.transform_fn_dir)
            raw_examples = (
                pipeline
                | 'ReadRawExamples' >> beam.io.ReadFromTFRecord(
                    known_args.raw_examples_path, coder=raw_examples_coder))
            raw_examples_dataset = (raw_examples, raw_examples_metadata)
            transformed_examples, transform_examples_metadata = (
                (raw_examples_dataset, transform_fn)
                | tft_beam.TransformDataset())
            transformed_examples_coder = tft.coders.ExampleProtoCoder(
                transform_examples_metadata.schema)
            transformed_examples | 'WriteTransformedExamples' >> beam.io.WriteToTFRecord(
                known_args.transformed_examples_path_prefix,
                file_name_suffix='.tfrecord.gz',
                coder=transformed_examples_coder)
Пример #8
0
def transform_data(train_data_file, test_data_file, working_dir):
    """Transform the data and write out as a TFRecord of Example protos.

  Read in the data using the CSV reader, and transform it using a
  preprocessing pipeline that scales numeric data and converts categorical data
  from strings to int64 values indices, by creating a vocabulary for each
  category.

  Args:
    train_data_file: File containing training data
    test_data_file: File containing test data
    working_dir: Directory to write transformed data and metadata to
  """
    def preprocessing_fn(inputs):
        """Preprocess input columns into transformed columns."""
        # Since we are modifying some features and leaving others unchanged, we
        # start by setting `outputs` to a copy of `inputs.
        outputs = inputs.copy()

        # Scale numeric columns to have range [0, 1].
        for key in NUMERIC_FEATURE_KEYS:
            outputs[key] = tft.scale_to_0_1(outputs[key])

        for key in OPTIONAL_NUMERIC_FEATURE_KEYS:
            # This is a SparseTensor because it is optional. Here we fill in a default
            # value when it is missing.
            dense = tf.compat.v1.sparse_to_dense(
                outputs[key].indices, [outputs[key].dense_shape[0], 1],
                outputs[key].values,
                default_value=0.)
            # Reshaping from a batch of vectors of size 1 to a batch to scalars.
            dense = tf.squeeze(dense, axis=1)
            outputs[key] = tft.scale_to_0_1(dense)

        # For all categorical columns except the label column, we generate a
        # vocabulary but do not modify the feature.  This vocabulary is instead
        # used in the trainer, by means of a feature column, to convert the feature
        # from a string to an integer id.
        for key in CATEGORICAL_FEATURE_KEYS:
            tft.vocabulary(inputs[key], vocab_filename=key)

        # For the label column we provide the mapping from string to index.
        table_keys = ['>50K', '<=50K']
        initializer = tf.lookup.KeyValueTensorInitializer(
            keys=table_keys,
            values=tf.cast(tf.range(len(table_keys)), tf.int64),
            key_dtype=tf.string,
            value_dtype=tf.int64)
        table = tf.lookup.StaticHashTable(initializer, default_value=-1)
        outputs[LABEL_KEY] = table.lookup(outputs[LABEL_KEY])

        return outputs

    # The "with" block will create a pipeline, and run that pipeline at the exit
    # of the block.
    with beam.Pipeline() as pipeline:
        with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
            # Create a coder to read the census data with the schema.  To do this we
            # need to list all columns in order since the schema doesn't specify the
            # order of columns in the csv.
            ordered_columns = [
                'age', 'workclass', 'fnlwgt', 'education', 'education-num',
                'marital-status', 'occupation', 'relationship', 'race', 'sex',
                'capital-gain', 'capital-loss', 'hours-per-week',
                'native-country', 'label'
            ]
            converter = tft.coders.CsvCoder(ordered_columns,
                                            RAW_DATA_METADATA.schema)

            # Read in raw data and convert using CSV converter.  Note that we apply
            # some Beam transformations here, which will not be encoded in the TF
            # graph since we don't do the from within tf.Transform's methods
            # (AnalyzeDataset, TransformDataset etc.).  These transformations are just
            # to get data into a format that the CSV converter can read, in particular
            # removing spaces after commas.
            #
            # We use MapAndFilterErrors instead of Map to filter out decode errors in
            # convert.decode which should only occur for the trailing blank line.
            raw_data = (
                pipeline
                | 'ReadTrainData' >> beam.io.ReadFromText(train_data_file)
                | 'FixCommasTrainData' >>
                beam.Map(lambda line: line.replace(', ', ','))
                | 'DecodeTrainData' >> MapAndFilterErrors(converter.decode))

            # Combine data and schema into a dataset tuple.  Note that we already used
            # the schema to read the CSV data, but we also need it to interpret
            # raw_data.
            raw_dataset = (raw_data, RAW_DATA_METADATA)
            transformed_dataset, transform_fn = (
                raw_dataset
                | tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))
            transformed_data, transformed_metadata = transformed_dataset
            transformed_data_coder = tft.coders.ExampleProtoCoder(
                transformed_metadata.schema)

            _ = (transformed_data
                 | 'EncodeTrainData' >> beam.Map(transformed_data_coder.encode)
                 | 'WriteTrainData' >> beam.io.WriteToTFRecord(
                     os.path.join(working_dir,
                                  TRANSFORMED_TRAIN_DATA_FILEBASE)))

            # Now apply transform function to test data.  In this case we remove the
            # trailing period at the end of each line, and also ignore the header line
            # that is present in the test data file.
            raw_test_data = (
                pipeline
                | 'ReadTestData' >> beam.io.ReadFromText(test_data_file,
                                                         skip_header_lines=1)
                | 'FixCommasTestData' >>
                beam.Map(lambda line: line.replace(', ', ','))
                | 'RemoveTrailingPeriodsTestData' >>
                beam.Map(lambda line: line[:-1])
                | 'DecodeTestData' >> MapAndFilterErrors(converter.decode))

            raw_test_dataset = (raw_test_data, RAW_DATA_METADATA)

            transformed_test_dataset = ((raw_test_dataset, transform_fn)
                                        | tft_beam.TransformDataset())
            # Don't need transformed data schema, it's the same as before.
            transformed_test_data, _ = transformed_test_dataset

            _ = (
                transformed_test_data
                | 'EncodeTestData' >> beam.Map(transformed_data_coder.encode)
                | 'WriteTestData' >> beam.io.WriteToTFRecord(
                    os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE)))

            # Will write a SavedModel and metadata to working_dir, which can then
            # be read by the tft.TFTransformOutput class.
            _ = (transform_fn
                 |
                 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir))
Пример #9
0
    def _RunBeamImpl(self, analyze_data_list: List[executor._Dataset],
                     transform_data_list: List[executor._Dataset],
                     transform_graph_uri: Text,
                     input_dataset_metadata: dataset_metadata.DatasetMetadata,
                     transform_output_path: Text,
                     raw_examples_data_format: int, temp_path: Text,
                     compute_statistics: bool,
                     per_set_stats_output_paths: Sequence[Text],
                     materialization_format: Optional[Text],
                     analyze_paths_count: int) -> executor._Status:
        """Perform data preprocessing with TFT.

    Args:
      analyze_data_list: List of datasets for analysis.
      transform_data_list: List of datasets for transform.
      preprocessing_fn: The tf.Transform preprocessing_fn.
      input_dataset_metadata: A DatasetMetadata object for the input data.
      transform_output_path: An absolute path to write the output to.
      raw_examples_data_format: The data format of the raw examples. One of the
        enums from example_gen_pb2.PayloadFormat.
      temp_path: A path to a temporary dir.
      compute_statistics: A bool indicating whether or not compute statistics.
      per_set_stats_output_paths: Paths to per-set statistics output. If empty,
        per-set statistics is not produced.
      materialization_format: A string describing the format of the materialized
        data or None if materialization is not enabled.
      analyze_paths_count: An integer, the number of paths that should be used
        for analysis.

    Returns:
      Status of the execution.
    """
        self._AssertSameTFXIOSchema(analyze_data_list)
        unprojected_typespecs = (
            analyze_data_list[0].tfxio.TensorAdapter().OriginalTypeSpecs())

        tf_transform_output = tft.TFTransformOutput(transform_graph_uri)

        analyze_input_columns = tft.get_analyze_input_columns(
            tf_transform_output.transform_raw_features, unprojected_typespecs)
        transform_input_columns = tft.get_transform_input_columns(
            tf_transform_output.transform_raw_features, unprojected_typespecs)
        # Use the same dataset (same columns) for AnalyzeDataset and computing
        # pre-transform stats so that the data will only be read once for these
        # two operations.
        if compute_statistics:
            analyze_input_columns = list(
                set(
                    list(analyze_input_columns) +
                    list(transform_input_columns)))

        for d in analyze_data_list:
            d.tfxio = d.tfxio.Project(analyze_input_columns)

        self._AssertSameTFXIOSchema(analyze_data_list)
        analyze_data_tensor_adapter_config = (
            analyze_data_list[0].tfxio.TensorAdapterConfig())

        for d in transform_data_list:
            d.tfxio = d.tfxio.Project(transform_input_columns)

        desired_batch_size = self._GetDesiredBatchSize(
            raw_examples_data_format)

        with self._CreatePipeline(transform_output_path) as pipeline:
            with tft_beam.Context(
                    temp_dir=temp_path,
                    desired_batch_size=desired_batch_size,
                    passthrough_keys=self._GetTFXIOPassthroughKeys(),
                    use_deep_copy_optimization=True,
                    use_tfxio=True):
                # pylint: disable=expression-not-assigned
                # pylint: disable=no-value-for-parameter
                # _ = (
                #     pipeline
                #     | 'IncrementPipelineMetrics' >> self._IncrementPipelineMetrics(
                #         len(unprojected_typespecs), len(analyze_input_columns),
                #         len(transform_input_columns), analyze_paths_count))
                #
                # # (new_analyze_data_dict, input_cache) = (
                # #     pipeline
                # #     | 'OptimizeRun' >> self._OptimizeRun(
                # #         input_cache_dir, output_cache_dir, analyze_data_list,
                # #         unprojected_typespecs, preprocessing_fn,
                # #         self._GetCacheSource()))
                #
                # # if input_cache:
                # #   absl.logging.debug('Analyzing data with cache.')
                #
                # full_analyze_dataset_keys_list = [
                #     dataset.dataset_key for dataset in analyze_data_list
                # ]
                #
                # # Removing unneeded datasets if they won't be needed for statistics or
                # # materialization.
                # # if materialization_format is None and not compute_statistics:
                # #   if None in new_analyze_data_dict.values():
                # #     absl.logging.debug(
                # #         'Not reading the following datasets due to cache: %s', [
                # #             dataset.file_pattern
                # #             for dataset in analyze_data_list
                # #             if new_analyze_data_dict[dataset.dataset_key] is None
                # #         ])
                # #   analyze_data_list = [
                # #       d for d in new_analyze_data_dict.values() if d is not None
                # #   ]
                #
                # input_analysis_data = {}
                # for dataset in analyze_data_list:
                #   infix = 'AnalysisIndex{}'.format(dataset.index)
                #   dataset.standardized = (
                #       pipeline
                #       | 'TFXIOReadAndDecode[{}]'.format(infix) >>
                #       dataset.tfxio.BeamSource(desired_batch_size))
                #
                #   input_analysis_data[dataset.dataset_key] = dataset.standardized
                # # input_analysis_data = {}
                # # for key, dataset in new_analyze_data_dict.items():
                # #   input_analysis_data[key] = (
                # #       None if dataset is None else dataset.standardized)
                #
                # # transform_fn, cache_output = (
                # #     (input_analysis_data, input_cache,
                # #      analyze_data_tensor_adapter_config)
                #     # | 'Analyze' >> tft_beam.AnalyzeDatasetWithCache(
                #     #     preprocessing_fn, pipeline=pipeline))
                # transform_fn = (
                #     (input_analysis_data, analyze_data_tensor_adapter_config)
                #     | 'Analyze' >> tft_beam.AnalyzeDataset(
                #         tf_transform_output.transform_raw_features, pipeline=pipeline))

                # WriteTransformFn writes transform_fn and metadata to subdirectories
                # tensorflow_transform.SAVED_MODEL_DIR and
                # tensorflow_transform.TRANSFORMED_METADATA_DIR respectively.
                # (transform_fn
                #  | 'WriteTransformFn'
                #  >> tft_beam.WriteTransformFn(transform_output_path))

                if compute_statistics or materialization_format is not None:
                    transform_fn = (
                        pipeline
                        | transform_fn_io.ReadTransformFn(transform_graph_uri))

                    # Do not compute pre-transform stats if the input format is raw proto,
                    # as StatsGen would treat any input as tf.Example. Note that
                    # tf.SequenceExamples are wire-format compatible with tf.Examples.
                    if (compute_statistics and not self._IsDataFormatProto(
                            raw_examples_data_format)):
                        # Aggregated feature stats before transformation.
                        pre_transform_feature_stats_path = os.path.join(
                            transform_output_path, tft.TFTransformOutput.
                            PRE_TRANSFORM_FEATURE_STATS_PATH)

                        if self._IsDataFormatSequenceExample(
                                raw_examples_data_format):
                            schema_proto = None
                        else:
                            schema_proto = executor._GetSchemaProto(
                                input_dataset_metadata)

                        if self._IsDataFormatSequenceExample(
                                raw_examples_data_format):

                            def _ExtractRawExampleBatches(record_batch):
                                return record_batch.column(
                                    record_batch.schema.get_field_index(
                                        RAW_EXAMPLE_KEY)).flatten().to_pylist(
                                        )

                            # Make use of the fact that tf.SequenceExample is wire-format
                            # compatible with tf.Example
                            stats_input = []
                            for dataset in analyze_data_list:
                                infix = 'AnalysisIndex{}'.format(dataset.index)
                                stats_input.append(
                                    dataset.standardized
                                    | 'ExtractRawExampleBatches[{}]'.format(
                                        infix) >> beam.Map(
                                            _ExtractRawExampleBatches)
                                    |
                                    'DecodeSequenceExamplesAsExamplesIntoRecordBatches[{}]'
                                    .format(infix) >> beam.ParDo(
                                        self._ToArrowRecordBatchesFn(
                                            schema_proto)))
                        else:
                            stats_input = [
                                dataset.standardized
                                for dataset in analyze_data_list
                            ]

                        pre_transform_stats_options = (
                            transform_stats_options.
                            get_pre_transform_stats_options())
                        (stats_input
                         | 'FlattenAnalysisDatasets' >>
                         beam.Flatten(pipeline=pipeline)
                         | 'GenerateStats[FlattenedAnalysisDataset]' >>
                         self._GenerateStats(
                             pre_transform_feature_stats_path,
                             schema_proto,
                             stats_options=pre_transform_stats_options))

                    # transform_data_list is a superset of analyze_data_list, we pay the
                    # cost to read the same dataset (analyze_data_list) again here to
                    # prevent certain beam runner from doing large temp materialization.
                    for dataset in transform_data_list:
                        infix = 'TransformIndex{}'.format(dataset.index)
                        dataset.standardized = (
                            pipeline | 'TFXIOReadAndDecode[{}]'.format(infix)
                            >> dataset.tfxio.BeamSource(desired_batch_size))
                        (dataset.transformed,
                         metadata) = (((dataset.standardized,
                                        dataset.tfxio.TensorAdapterConfig()),
                                       transform_fn)
                                      | 'Transform[{}]'.format(infix) >>
                                      tft_beam.TransformDataset())

                        dataset.transformed_and_serialized = (
                            dataset.transformed
                            | 'EncodeAndSerialize[{}]'.format(infix) >>
                            beam.ParDo(self._EncodeAsSerializedExamples(),
                                       executor._GetSchemaProto(metadata)))

                    if compute_statistics:
                        # Aggregated feature stats after transformation.
                        _, metadata = transform_fn

                        # TODO(b/70392441): Retain tf.Metadata (e.g., IntDomain) in
                        # schema. Currently input dataset schema only contains dtypes,
                        # and other metadata is dropped due to roundtrip to tensors.
                        transformed_schema_proto = executor._GetSchemaProto(
                            metadata)

                        for dataset in transform_data_list:
                            infix = 'TransformIndex{}'.format(dataset.index)
                            dataset.transformed_and_standardized = (
                                dataset.transformed_and_serialized
                                | 'FromTransformedToArrowRecordBatches[{}]'.
                                format(infix) >> self._ToArrowRecordBatches(
                                    schema=transformed_schema_proto))

                        post_transform_feature_stats_path = os.path.join(
                            transform_output_path, tft.TFTransformOutput.
                            POST_TRANSFORM_FEATURE_STATS_PATH)

                        post_transform_stats_options = (
                            transform_stats_options.
                            get_post_transform_stats_options())
                        ([
                            dataset.transformed_and_standardized
                            for dataset in transform_data_list
                        ]
                         | 'FlattenTransformedDatasets' >> beam.Flatten()
                         | 'GenerateStats[FlattenedTransformedDatasets]' >>
                         self._GenerateStats(
                             post_transform_feature_stats_path,
                             transformed_schema_proto,
                             stats_options=post_transform_stats_options))

                        if per_set_stats_output_paths:
                            # TODO(b/130885503): Remove duplicate stats gen compute that is
                            # done both on a flattened view of the data, and on each span
                            # below.
                            for dataset in transform_data_list:
                                infix = 'TransformIndex{}'.format(
                                    dataset.index)
                                (dataset.transformed_and_standardized
                                 | 'GenerateStats[{}]'.format(infix) >>
                                 self._GenerateStats(
                                     dataset.stats_output_path,
                                     transformed_schema_proto,
                                     stats_options=post_transform_stats_options
                                 ))

                    if materialization_format is not None:
                        for dataset in transform_data_list:
                            infix = 'TransformIndex{}'.format(dataset.index)
                            (dataset.transformed_and_serialized
                             | 'Materialize[{}]'.format(infix) >>
                             self._WriteExamples(
                                 materialization_format,
                                 dataset.materialize_output_path))

        return executor._Status.OK()
Пример #10
0
def transform_data(train_data_file, test_data_file, working_dir):
    """Transform the data and write out as a TFRecord of Example protos.

  Read in the data using the CSV reader, and transform it using a
  preprocessing pipeline that scales numeric data and converts categorical data
  from strings to int64 values indices, by creating a vocabulary for each
  category.

  Args:
    train_data_file: File containing training data
    test_data_file: File containing test data
    working_dir: Directory to write transformed data and metadata to
  """

    # The "with" block will create a pipeline, and run that pipeline at the exit
    # of the block.
    with apache_beam.Pipeline() as pipeline:
        with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
            # Create a coder to read the census data with the schema.  To do this we
            # need to list all columns in order since the schema doesn't specify the
            # order of columns in the csv.
            ordered_columns = [
                'age', 'workclass', 'fnlwgt', 'education', 'education-num',
                'marital-status', 'occupation', 'relationship', 'race', 'sex',
                'capital-gain', 'capital-loss', 'hours-per-week',
                'native-country', 'label'
            ]
            converter = tft.coders.CsvCoder(ordered_columns,
                                            RAW_DATA_METADATA.schema)

            # Read in raw data and convert using CSV converter.  Note that we apply
            # some Beam transformations here, which will not be encoded in the TF
            # graph since we don't do them from within tf.Transform's methods
            # (AnalyzeDataset, TransformDataset etc.).  These transformations are just
            # to get data into a format that the CSV converter can read, in particular
            # removing spaces after commas.
            #
            # We use MapAndFilterErrors instead of Map to filter out decode errors in
            # convert.decode which should only occur for the trailing blank line.
            raw_data = (
                pipeline
                |
                'ReadTrainData' >> apache_beam.io.ReadFromText(train_data_file)
                | 'FixCommasTrainData' >>
                apache_beam.Map(lambda line: line.replace(', ', ','))
                | 'DecodeTrainData' >> MapAndFilterErrors(converter.decode))

            # Combine data and schema into a dataset tuple.  Note that we already used
            # the schema to read the CSV data, but we also need it to interpret
            # raw_data.
            raw_dataset = (raw_data, RAW_DATA_METADATA)
            transformed_dataset, transform_fn = (
                raw_dataset
                | tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))
            transformed_data, transformed_metadata = transformed_dataset
            # A coder between TF Examples and tf.Transform datasets.
            # Used to encode a tf.transform encoded dict as tf.Example.
            transformed_data_coder = tft.coders.ExampleProtoCoder(
                transformed_metadata.schema)

            _ = (transformed_data
                 | 'EncodeTrainData' >> apache_beam.Map(
                     transformed_data_coder.encode)
                 | 'WriteTrainData' >> apache_beam.io.WriteToTFRecord(
                     os.path.join(working_dir,
                                  TRANSFORMED_TRAIN_DATA_FILEBASE)))

            # Now apply transform function to test data.  In this case we remove the
            # trailing period at the end of each line, and also ignore the header line
            # that is present in the test data file.
            raw_test_data = (
                pipeline
                | 'ReadTestData' >> apache_beam.io.ReadFromText(
                    test_data_file, skip_header_lines=1)
                | 'FixCommasTestData' >>
                apache_beam.Map(lambda line: line.replace(', ', ','))
                | 'RemoveTrailingPeriodsTestData' >>
                apache_beam.Map(lambda line: line[:-1])
                | 'DecodeTestData' >> MapAndFilterErrors(converter.decode))

            raw_test_dataset = (raw_test_data, RAW_DATA_METADATA)

            transformed_test_dataset = ((raw_test_dataset, transform_fn)
                                        | tft_beam.TransformDataset())
            # Don't need transformed data schema, it's the same as before.
            transformed_test_data, _ = transformed_test_dataset

            _ = (
                transformed_test_data
                | 'EncodeTestData' >> apache_beam.Map(
                    transformed_data_coder.encode)
                | 'WriteTestData' >> apache_beam.io.WriteToTFRecord(
                    os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE)))

            # Will write a SavedModel and metadata to working_dir, which can then
            # be read by the tft.TFTransformOutput class.
            _ = (transform_fn
                 |
                 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir))
Пример #11
0
  def _RunBeamImpl(self, inputs: Mapping[Text, Any],
                   outputs: Mapping[Text, Any], preprocessing_fn: Any,
                   input_dataset_metadata: dataset_metadata.DatasetMetadata,
                   raw_examples_data_format: Text, transform_output_path: Text,
                   compute_statistics: bool,
                   materialize_output_paths: Sequence[Text]) -> _Status:
    """Perform data preprocessing with FlumeC++ runner.

    Args:
      inputs: A dictionary of labelled input values.
      outputs: A dictionary of labelled output values.
      preprocessing_fn: The tf.Transform preprocessing_fn.
      input_dataset_metadata: A DatasetMetadata object for the input data.
      raw_examples_data_format: A string describing the raw data format.
      transform_output_path: An absolute path to write the output to.
      compute_statistics: A bool indicating whether or not compute statistics.
      materialize_output_paths: Paths to materialized outputs.

    Raises:
      RuntimeError: If reset() is not being invoked between two run().
      ValueError: If the schema is empty.

    Returns:
      Status of the execution.
    """
    raw_examples_file_format = common.GetSoleValue(
        inputs, labels.EXAMPLES_FILE_FORMAT_LABEL, strict=False)
    analyze_and_transform_data_paths = common.GetValues(
        inputs, labels.ANALYZE_AND_TRANSFORM_DATA_PATHS_LABEL)
    transform_only_data_paths = common.GetValues(
        inputs, labels.TRANSFORM_ONLY_DATA_PATHS_LABEL)
    stats_use_tfdv = common.GetSoleValue(inputs,
                                         labels.TFT_STATISTICS_USE_TFDV_LABEL)
    per_set_stats_output_paths = common.GetValues(
        outputs, labels.PER_SET_STATS_OUTPUT_PATHS_LABEL)
    temp_path = common.GetSoleValue(outputs, labels.TEMP_OUTPUT_LABEL)

    input_cache_dir = common.GetSoleValue(
        inputs, labels.CACHE_INPUT_PATH_LABEL, strict=False)
    output_cache_dir = common.GetSoleValue(
        outputs, labels.CACHE_OUTPUT_PATH_LABEL, strict=False)

    tf.logging.info('Analyze and transform data patterns: %s',
                    list(enumerate(analyze_and_transform_data_paths)))
    tf.logging.info('Transform data patterns: %s',
                    list(enumerate(transform_only_data_paths)))
    tf.logging.info('Transform materialization output paths: %s',
                    list(enumerate(materialize_output_paths)))
    tf.logging.info('Transform output path: %s', transform_output_path)

    feature_spec = schema_utils.schema_as_feature_spec(
        _GetSchemaProto(input_dataset_metadata)).feature_spec
    try:
      analyze_input_columns = tft.get_analyze_input_columns(
          preprocessing_fn, feature_spec)
      transform_input_columns = (
          tft.get_transform_input_columns(preprocessing_fn, feature_spec))
    except AttributeError:
      # If using TFT 1.12, fall back to assuming all features are used.
      analyze_input_columns = feature_spec.keys()
      transform_input_columns = feature_spec.keys()
    # Use the same dataset (same columns) for AnalyzeDataset and computing
    # pre-transform stats so that the data will only be read once for these
    # two operations.
    if compute_statistics:
      analyze_input_columns = list(
          set(list(analyze_input_columns) + list(transform_input_columns)))
    if input_dataset_metadata.schema is _RAW_EXAMPLE_SCHEMA:
      analyze_input_dataset_metadata = input_dataset_metadata
      transform_input_dataset_metadata = input_dataset_metadata
    else:
      analyze_input_dataset_metadata = dataset_metadata.DatasetMetadata(
          dataset_schema.from_feature_spec(
              {feature: feature_spec[feature]
               for feature in analyze_input_columns}))
      transform_input_dataset_metadata = dataset_metadata.DatasetMetadata(
          dataset_schema.from_feature_spec(
              {feature: feature_spec[feature]
               for feature in transform_input_columns}))

    can_process_jointly = not bool(per_set_stats_output_paths or
                                   materialize_output_paths or output_cache_dir)
    analyze_data_list = self._MakeDatasetList(
        analyze_and_transform_data_paths, raw_examples_file_format,
        raw_examples_data_format, analyze_input_dataset_metadata,
        can_process_jointly)
    transform_data_list = self._MakeDatasetList(
        list(analyze_and_transform_data_paths) +
        list(transform_only_data_paths), raw_examples_file_format,
        raw_examples_data_format, transform_input_dataset_metadata,
        can_process_jointly)

    desired_batch_size = self._GetDesiredBatchSize(raw_examples_data_format)

    with self._CreatePipeline(outputs) as p:
      with tft_beam.Context(
          temp_dir=temp_path,
          desired_batch_size=desired_batch_size,
          passthrough_keys={_TRANSFORM_INTERNAL_FEATURE_FOR_KEY},
          use_deep_copy_optimization=True):
        # pylint: disable=expression-not-assigned
        # pylint: disable=no-value-for-parameter

        _ = (
            p | self._IncrementColumnUsageCounter(
                len(feature_spec.keys()), len(analyze_input_columns),
                len(transform_input_columns)))

        (new_analyze_data_dict, input_cache, flat_data_required) = (
            p | self._OptimizeRun(input_cache_dir, output_cache_dir,
                                  analyze_data_list, feature_spec,
                                  preprocessing_fn, self._GetCacheSource()))
        # Removing unneeded datasets if they won't be needed for
        # materialization. This means that these datasets won't be included in
        # the statistics computation or profiling either.
        if not materialize_output_paths:
          analyze_data_list = [
              d for d in new_analyze_data_dict.values() if d is not None
          ]

        analyze_decode_fn = (
            self._GetDecodeFunction(raw_examples_data_format,
                                    analyze_input_dataset_metadata.schema))

        for (idx, dataset) in enumerate(analyze_data_list):
          dataset.encoded = (
              p | 'ReadAnalysisDataset[{}]'.format(idx) >>
              self._ReadExamples(dataset))
          dataset.decoded = (
              dataset.encoded
              | 'DecodeAnalysisDataset[{}]'.format(idx) >>
              self._DecodeInputs(analyze_decode_fn))

        input_analysis_data = {}
        for key, dataset in six.iteritems(new_analyze_data_dict):
          if dataset is None:
            input_analysis_data[key] = None
          else:
            input_analysis_data[key] = dataset.decoded

        if flat_data_required:
          flat_input_analysis_data = (
              [dataset.decoded for dataset in analyze_data_list]
              | 'FlattenAnalysisDatasets' >> beam.Flatten(pipeline=p))
        else:
          flat_input_analysis_data = None
        if input_cache:
          tf.logging.info('Analyzing data with cache.')
        transform_fn, cache_output = (
            (flat_input_analysis_data, input_analysis_data, input_cache,
             input_dataset_metadata)
            | 'AnalyzeDataset' >> tft_beam.AnalyzeDatasetWithCache(
                preprocessing_fn, pipeline=p))

        # Write the raw/input metadata.
        (input_dataset_metadata
         | 'WriteMetadata' >> tft_beam.WriteMetadata(
             os.path.join(transform_output_path,
                          tft.TFTransformOutput.RAW_METADATA_DIR), p))

        # WriteTransformFn writes transform_fn and metadata to subdirectories
        # tensorflow_transform.SAVED_MODEL_DIR and
        # tensorflow_transform.TRANSFORMED_METADATA_DIR respectively.
        (transform_fn |
         'WriteTransformFn' >> tft_beam.WriteTransformFn(transform_output_path))

        if output_cache_dir is not None and cache_output is not None:
          # TODO(b/37788560): Possibly make this part of the beam graph.
          tf.io.gfile.makedirs(output_cache_dir)
          tf.logging.info('Using existing cache in: %s', input_cache_dir)
          if input_cache_dir is not None:
            # Only copy cache that is relevant to this iteration. This is
            # assuming that this pipeline operates on rolling ranges, so those
            # cache entries may also be relevant for future iterations.
            for span_cache_dir in input_analysis_data:
              full_span_cache_dir = os.path.join(input_cache_dir,
                                                 span_cache_dir)
              if tf.io.gfile.isdir(full_span_cache_dir):
                self._CopyCache(full_span_cache_dir,
                                os.path.join(output_cache_dir, span_cache_dir))

          (cache_output
           | 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
               p, output_cache_dir, sink=self._GetCacheSink()))

        if compute_statistics or materialize_output_paths:
          # Do not compute pre-transform stats if the input format is raw proto,
          # as StatsGen would treat any input as tf.Example.
          if (compute_statistics and
              not self._IsDataFormatProto(raw_examples_data_format)):
            # Aggregated feature stats before transformation.
            pre_transform_feature_stats_path = os.path.join(
                transform_output_path,
                tft.TFTransformOutput.PRE_TRANSFORM_FEATURE_STATS_PATH)

            schema_proto = _GetSchemaProto(analyze_input_dataset_metadata)
            ([
                dataset.decoded if stats_use_tfdv else dataset.encoded
                for dataset in analyze_data_list
            ]
             | 'FlattenPreTransformAnalysisDatasets' >> beam.Flatten(pipeline=p)
             | 'GenerateAggregatePreTransformAnalysisStats' >>
             self._GenerateStats(
                 pre_transform_feature_stats_path,
                 schema_proto,
                 use_deep_copy_optimization=True,
                 use_tfdv=stats_use_tfdv))

          transform_decode_fn = (
              self._GetDecodeFunction(raw_examples_data_format,
                                      transform_input_dataset_metadata.schema))
          # transform_data_list is a superset of analyze_data_list, we pay the
          # cost to read the same dataset (analyze_data_list) again here to
          # prevent certain beam runner from doing large temp materialization.
          for (idx, dataset) in enumerate(transform_data_list):
            dataset.encoded = (
                p
                | 'ReadTransformDataset[{}]'.format(idx) >>
                self._ReadExamples(dataset))
            dataset.decoded = (
                dataset.encoded
                | 'DecodeTransformDataset[{}]'.format(idx) >>
                self._DecodeInputs(transform_decode_fn))
            (dataset.transformed,
             metadata) = (((dataset.decoded, transform_input_dataset_metadata),
                           transform_fn)
                          | 'TransformDataset[{}]'.format(idx) >>
                          tft_beam.TransformDataset())

            if materialize_output_paths or not stats_use_tfdv:
              dataset.transformed_and_encoded = (
                  dataset.transformed
                  | 'EncodeTransformedDataset[{}]'.format(idx) >> beam.ParDo(
                      self._EncodeAsExamples(), metadata))

          if compute_statistics:
            # Aggregated feature stats after transformation.
            _, metadata = transform_fn
            post_transform_feature_stats_path = os.path.join(
                transform_output_path,
                tft.TFTransformOutput.POST_TRANSFORM_FEATURE_STATS_PATH)

            # TODO(b/70392441): Retain tf.Metadata (e.g., IntDomain) in
            # schema. Currently input dataset schema only contains dtypes,
            # and other metadata is dropped due to roundtrip to tensors.
            transformed_schema_proto = _GetSchemaProto(metadata)

            ([(dataset.transformed
               if stats_use_tfdv else dataset.transformed_and_encoded)
              for dataset in transform_data_list]
             | 'FlattenPostTransformAnalysisDatasets' >> beam.Flatten()
             | 'GenerateAggregatePostTransformAnalysisStats' >>
             self._GenerateStats(
                 post_transform_feature_stats_path,
                 transformed_schema_proto,
                 use_tfdv=stats_use_tfdv))

            if per_set_stats_output_paths:
              assert len(transform_data_list) == len(per_set_stats_output_paths)
              # TODO(b/67632871): Remove duplicate stats gen compute that is
              # done both on a flattened view of the data, and on each span
              # below.
              bundles = zip(transform_data_list, per_set_stats_output_paths)
              for (idx, (dataset, output_path)) in enumerate(bundles):
                if stats_use_tfdv:
                  data = dataset.transformed
                else:
                  data = dataset.transformed_and_encoded
                (data
                 | 'GeneratePostTransformStats[{}]'.format(idx) >>
                 self._GenerateStats(
                     output_path,
                     transformed_schema_proto,
                     use_tfdv=stats_use_tfdv))

          if materialize_output_paths:
            assert len(transform_data_list) == len(materialize_output_paths)
            bundles = zip(transform_data_list, materialize_output_paths)
            for (idx, (dataset, output_path)) in enumerate(bundles):
              (dataset.transformed_and_encoded
               | 'Materialize[{}]'.format(idx) >> self._WriteExamples(
                   raw_examples_file_format, output_path))

    return _Status.OK()
Пример #12
0
def transform_data(train_data_file, test_data_file, working_dir,
                   root_train_data_out, root_test_data_out, pipeline_options):
    """Transform the data and write out as a TFRecord of Example protos.
    Read in the data using the CSV reader, and transform it using a
    preprocessing pipeline that scales numeric data and converts categorical data
    from strings to int64 values indices, by creating a vocabulary for each
    category.
    Args:
        train_data_file: File containing training data
        test_data_file: File containing test data
        working_dir: Directory to write transformed data and metadata to
        root_train_data_out: Root of file containing transform training data
        root_test_data_out: Root of file containing transform test data
        pipeline_options: beam.pipeline.PipelineOptions defining DataFlow options
    """

    # The "with" block will create a pipeline, and run that pipeline at the exit
    # of the block.
    with beam.Pipeline(options=pipeline_options) as pipeline:
        tmp_dir = pipeline_options.get_all_options()['temp_location']
        with tft_beam.Context(tmp_dir):

            converter = tft.coders.csv_coder.CsvCoder(ORDERED_COLUMNS,
                                                      RAW_DATA_METADATA.schema)
            raw_data_ = (pipeline
                         | 'Train:ReadData' >> beam.io.ReadFromText(
                             train_data_file, skip_header_lines=1)
                         | 'Train:RemoveNull' >> beam.ParDo(
                             RemoveNull()).with_outputs('Y', 'N'))
            raw_data = (raw_data_.Y
                        | 'Train:Reshuffle' >> Shuffle()
                        | 'Train:Decode' >> beam.Map(converter.decode))

            raw_dataset = (raw_data, RAW_DATA_METADATA)
            transformed_dataset, transform_fn = (
                raw_dataset
                | tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))
            transformed_data, transformed_metadata = transformed_dataset

            # the important part
            transformed_data_coder = tft.coders.ExampleProtoCoder(
                transformed_metadata.schema)

            _ = transformed_data | 'Train:WriteData' >> beam.io.WriteToTFRecord(
                os.path.join(working_dir, root_train_data_out),
                coder=transformed_data_coder)

            raw_test_data_ = (pipeline
                              | 'Test:ReadData' >> beam.io.ReadFromText(
                                  test_data_file, skip_header_lines=1)
                              | 'Test:RemoveNull' >> beam.ParDo(
                                  RemoveNull()).with_outputs('Y', 'N'))
            raw_test_data = (raw_test_data_.Y
                             | 'Test:Reshuffle' >> Shuffle()
                             | 'Test:DecodeData' >> beam.Map(converter.decode))

            raw_test_dataset = (raw_test_data, RAW_DATA_METADATA)

            transformed_test_dataset = ((raw_test_dataset, transform_fn)
                                        | tft_beam.TransformDataset())
            # Don't need transformed data schema, it's the same as before.
            transformed_test_data, _ = transformed_test_dataset

            _ = transformed_test_data | 'Test:WriteData' >> beam.io.WriteToTFRecord(
                os.path.join(working_dir, root_test_data_out),
                coder=transformed_data_coder)

            # Will write a SavedModel and metadata to two subdirectories of
            # working_dir, given by transform_fn_io.TRANSFORM_FN_DIR and
            # transform_fn_io.TRANSFORMED_METADATA_DIR respectively.
            _ = (transform_fn
                 |
                 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir))

            _ = ((raw_data_.N, raw_test_data_.N)
                 | beam.Flatten()
                 | 'WriteError' >> beam.io.WriteToText(
                     os.path.join(working_dir, 'error')))
Пример #13
0
    def _RunBeamImpl(self, inputs, outputs, preprocessing_fn,
                     input_dataset_metadata, raw_examples_data_format,
                     transform_output_path, compute_statistics,
                     materialize_output_paths):
        """Perform data preprocessing with FlumeC++ runner.

    Args:
      inputs: A dictionary of labelled input values.
      outputs: A dictionary of labelled output values.
      preprocessing_fn: The tf.Transform preprocessing_fn.
      input_dataset_metadata: A DatasetMetadata object for the input data.
      raw_examples_data_format: A string describing the raw data format.
      transform_output_path: An absolute path to write the output to.
      compute_statistics: A bool indicating whether or not compute statistics.
      materialize_output_paths: Paths to materialized outputs.

    Raises:
      RuntimeError: If reset() is not being invoked between two run().
      ValueError: If the schema is empty.

    Returns:
      Status of the execution.
    """
        raw_examples_file_format = common.GetSoleValue(
            inputs, labels.EXAMPLES_FILE_FORMAT_LABEL, strict=False)
        analyze_and_transform_data_paths = common.GetValues(
            inputs, labels.ANALYZE_AND_TRANSFORM_DATA_PATHS_LABEL)
        transform_only_data_paths = common.GetValues(
            inputs, labels.TRANSFORM_ONLY_DATA_PATHS_LABEL)
        stats_use_tfdv = common.GetSoleValue(
            inputs, labels.TFT_STATISTICS_USE_TFDV_LABEL)
        per_set_stats_output_paths = common.GetValues(
            outputs, labels.PER_SET_STATS_OUTPUT_PATHS_LABEL)
        temp_path = common.GetSoleValue(outputs, labels.TEMP_OUTPUT_LABEL)

        tf.logging.info('Analyze and transform data patterns: %s',
                        list(enumerate(analyze_and_transform_data_paths)))
        tf.logging.info('Transform data patterns: %s',
                        list(enumerate(transform_only_data_paths)))
        tf.logging.info('Transform materialization output paths: %s',
                        list(enumerate(materialize_output_paths)))
        tf.logging.info('Transform output path: %s', transform_output_path)

        feature_spec = input_dataset_metadata.schema.as_feature_spec()
        try:
            analyze_input_columns = tft.get_analyze_input_columns(
                preprocessing_fn, feature_spec)
            transform_input_columns = (tft.get_transform_input_columns(
                preprocessing_fn, feature_spec))
        except AttributeError:
            # If using TFT 1.12, fall back to assuming all features are used.
            analyze_input_columns = feature_spec.keys()
            transform_input_columns = feature_spec.keys()
        # Use the same dataset (same columns) for AnalyzeDataset and computing
        # pre-transform stats so that the data will only be read once for these
        # two operations.
        if compute_statistics:
            analyze_input_columns = list(
                set(
                    list(analyze_input_columns) +
                    list(transform_input_columns)))
        analyze_input_dataset_metadata = copy.deepcopy(input_dataset_metadata)
        transform_input_dataset_metadata = copy.deepcopy(
            input_dataset_metadata)
        if input_dataset_metadata.schema is not _RAW_EXAMPLE_SCHEMA:
            analyze_input_dataset_metadata.schema = dataset_schema.from_feature_spec(
                {
                    feature: feature_spec[feature]
                    for feature in analyze_input_columns
                })
            transform_input_dataset_metadata.schema = (
                dataset_schema.from_feature_spec({
                    feature: feature_spec[feature]
                    for feature in transform_input_columns
                }))

        can_process_jointly = not bool(per_set_stats_output_paths
                                       or materialize_output_paths)
        analyze_data_list = self._MakeDatasetList(
            analyze_and_transform_data_paths, raw_examples_file_format,
            raw_examples_data_format, analyze_input_dataset_metadata,
            can_process_jointly)
        transform_data_list = self._MakeDatasetList(
            list(analyze_and_transform_data_paths) +
            list(transform_only_data_paths), raw_examples_file_format,
            raw_examples_data_format, transform_input_dataset_metadata,
            can_process_jointly)

        desired_batch_size = self._GetDesiredBatchSize(
            raw_examples_data_format)

        with self._CreatePipeline(outputs) as p:
            with tft_beam.Context(
                    temp_dir=temp_path,
                    desired_batch_size=desired_batch_size,
                    passthrough_keys={_TRANSFORM_INTERNAL_FEATURE_FOR_KEY},
                    use_deep_copy_optimization=True):
                # pylint: disable=expression-not-assigned
                # pylint: disable=no-value-for-parameter

                analyze_decode_fn = (self._GetDecodeFunction(
                    raw_examples_data_format,
                    analyze_input_dataset_metadata.schema))

                for (idx, dataset) in enumerate(analyze_data_list):
                    dataset.encoded = (p
                                       | 'ReadAnalysisDataset[{}]'.format(idx)
                                       >> self._ReadExamples(dataset))
                    dataset.decoded = (
                        dataset.encoded
                        | 'DecodeAnalysisDataset[{}]'.format(idx) >>
                        self._DecodeInputs(analyze_decode_fn))

                input_analysis_data = (
                    [dataset.decoded for dataset in analyze_data_list]
                    | 'FlattenAnalysisDatasets' >> beam.Flatten())
                transform_fn = ((input_analysis_data, input_dataset_metadata)
                                | 'AnalyzeDataset' >>
                                tft_beam.AnalyzeDataset(preprocessing_fn))
                # Write the raw/input metadata.
                (input_dataset_metadata
                 | 'WriteMetadata' >> tft_beam.WriteMetadata(
                     os.path.join(transform_output_path,
                                  tft.TFTransformOutput.RAW_METADATA_DIR), p))

                # WriteTransformFn writes transform_fn and metadata to subdirectories
                # tensorflow_transform.SAVED_MODEL_DIR and
                # tensorflow_transform.TRANSFORMED_METADATA_DIR respectively.
                (transform_fn | 'WriteTransformFn' >>
                 tft_beam.WriteTransformFn(transform_output_path))

                if compute_statistics or materialize_output_paths:
                    # Do not compute pre-transform stats if the input format is raw proto,
                    # as StatsGen would treat any input as tf.Example.
                    if (compute_statistics and not self._IsDataFormatProto(
                            raw_examples_data_format)):
                        # Aggregated feature stats before transformation.
                        pre_transform_feature_stats_path = os.path.join(
                            transform_output_path, tft.TFTransformOutput.
                            PRE_TRANSFORM_FEATURE_STATS_PATH)

                        # TODO(b/70392441): Retain tf.Metadata (e.g., IntDomain) in
                        # schema. Currently input dataset schema only contains dtypes,
                        # and other metadata is dropped due to roundtrip to tensors.
                        schema_proto = schema_utils.schema_from_feature_spec(
                            analyze_input_dataset_metadata.schema.
                            as_feature_spec())
                        ([
                            dataset.decoded
                            if stats_use_tfdv else dataset.encoded
                            for dataset in analyze_data_list
                        ]
                         | 'FlattenPreTransformAnalysisDatasets' >>
                         beam.Flatten()
                         | 'GenerateAggregatePreTransformAnalysisStats' >>
                         self._GenerateStats(pre_transform_feature_stats_path,
                                             schema_proto,
                                             use_deep_copy_optimization=True,
                                             use_tfdv=stats_use_tfdv))

                    transform_decode_fn = (self._GetDecodeFunction(
                        raw_examples_data_format,
                        transform_input_dataset_metadata.schema))
                    # transform_data_list is a superset of analyze_data_list, we pay the
                    # cost to read the same dataset (analyze_data_list) again here to
                    # prevent certain beam runner from doing large temp materialization.
                    for (idx, dataset) in enumerate(transform_data_list):
                        dataset.encoded = (
                            p
                            | 'ReadTransformDataset[{}]'.format(idx) >>
                            self._ReadExamples(dataset))
                        dataset.decoded = (
                            dataset.encoded
                            | 'DecodeTransformDataset[{}]'.format(idx) >>
                            self._DecodeInputs(transform_decode_fn))
                        (dataset.transformed, metadata) = (
                            ((dataset.decoded,
                              transform_input_dataset_metadata), transform_fn)
                            | 'TransformDataset[{}]'.format(idx) >>
                            tft_beam.TransformDataset())

                        if materialize_output_paths or not stats_use_tfdv:
                            dataset.transformed_and_encoded = (
                                dataset.transformed
                                | 'EncodeTransformedDataset[{}]'.format(idx) >>
                                beam.ParDo(self._EncodeAsExamples(), metadata))

                    if compute_statistics:
                        # Aggregated feature stats after transformation.
                        _, metadata = transform_fn
                        post_transform_feature_stats_path = os.path.join(
                            transform_output_path, tft.TFTransformOutput.
                            POST_TRANSFORM_FEATURE_STATS_PATH)

                        # TODO(b/70392441): Retain tf.Metadata (e.g., IntDomain) in
                        # schema. Currently input dataset schema only contains dtypes,
                        # and other metadata is dropped due to roundtrip to tensors.
                        transformed_schema_proto = schema_utils.schema_from_feature_spec(
                            metadata.schema.as_feature_spec())

                        ([(dataset.transformed if stats_use_tfdv else
                           dataset.transformed_and_encoded)
                          for dataset in transform_data_list]
                         | 'FlattenPostTransformAnalysisDatasets' >>
                         beam.Flatten()
                         | 'GenerateAggregatePostTransformAnalysisStats' >>
                         self._GenerateStats(post_transform_feature_stats_path,
                                             transformed_schema_proto,
                                             use_tfdv=stats_use_tfdv))

                        if per_set_stats_output_paths:
                            assert len(transform_data_list) == len(
                                per_set_stats_output_paths)
                            # TODO(b/67632871): Remove duplicate stats gen compute that is
                            # done both on a flattened view of the data, and on each span
                            # below.
                            bundles = zip(transform_data_list,
                                          per_set_stats_output_paths)
                            for (idx, (dataset,
                                       output_path)) in enumerate(bundles):
                                if stats_use_tfdv:
                                    data = dataset.transformed
                                else:
                                    data = dataset.transformed_and_encoded
                                (data
                                 | 'GeneratePostTransformStats[{}]'.format(idx)
                                 >> self._GenerateStats(
                                     output_path,
                                     transformed_schema_proto,
                                     use_tfdv=stats_use_tfdv))

                    if materialize_output_paths:
                        assert len(transform_data_list) == len(
                            materialize_output_paths)
                        bundles = zip(transform_data_list,
                                      materialize_output_paths)
                        for (idx, (dataset,
                                   output_path)) in enumerate(bundles):
                            (dataset.transformed_and_encoded
                             | 'Materialize[{}]'.format(idx) >>
                             self._WriteExamples(raw_examples_file_format,
                                                 output_path))

        return _Status.OK()
Пример #14
0
def transform_data(working_dir):
    """Transform the data and write out as a TFRecord of Example protos.

  Read in the data from the positive and negative examples on disk, and
  transform it using a preprocessing pipeline that removes punctuation,
  tokenizes and maps tokens to int64 values indices.

  Args:
    working_dir: Directory to read shuffled data from and write transformed data
        and metadata to.
  """

    with beam.Pipeline() as pipeline:
        with tft_beam.Context(
                temp_dir=os.path.join(working_dir, TRANSFORM_TEMP_DIR)):
            coder = tft.coders.ExampleProtoCoder(RAW_DATA_METADATA.schema)
            train_data = (pipeline
                          | 'ReadTrain' >> beam.io.ReadFromTFRecord(
                              os.path.join(working_dir,
                                           SHUFFLED_TRAIN_DATA_FILEBASE + '*'))
                          | 'DecodeTrain' >> beam.Map(coder.decode))

            test_data = (pipeline
                         | 'ReadTest' >> beam.io.ReadFromTFRecord(
                             os.path.join(working_dir,
                                          SHUFFLED_TEST_DATA_FILEBASE + '*'))
                         | 'DecodeTest' >> beam.Map(coder.decode))

            def preprocessing_fn(inputs):
                """Preprocess input columns into transformed columns."""
                review = inputs[REVIEW_KEY]

                # Here tf.compat.v1.string_split behaves differently from
                # tf.strings.split.
                review_tokens = tf.compat.v1.string_split(review, DELIMITERS)
                review_indices = tft.compute_and_apply_vocabulary(
                    review_tokens, top_k=VOCAB_SIZE)
                # Add one for the oov bucket created by compute_and_apply_vocabulary.
                review_bow_indices, review_weight = tft.tfidf(
                    review_indices, VOCAB_SIZE + 1)
                return {
                    REVIEW_KEY: review_bow_indices,
                    REVIEW_WEIGHT_KEY: review_weight,
                    LABEL_KEY: inputs[LABEL_KEY]
                }

            (transformed_train_data, transformed_metadata), transform_fn = (
                (train_data, RAW_DATA_METADATA)
                | 'AnalyzeAndTransform' >>
                tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))
            transformed_data_coder = tft.coders.ExampleProtoCoder(
                transformed_metadata.schema)

            transformed_test_data, _ = (
                ((test_data, RAW_DATA_METADATA), transform_fn)
                | 'Transform' >> tft_beam.TransformDataset())

            _ = (transformed_train_data
                 | 'EncodeTrainData' >> beam.Map(transformed_data_coder.encode)
                 | 'WriteTrainData' >> beam.io.WriteToTFRecord(
                     os.path.join(working_dir,
                                  TRANSFORMED_TRAIN_DATA_FILEBASE)))

            _ = (
                transformed_test_data
                | 'EncodeTestData' >> beam.Map(transformed_data_coder.encode)
                | 'WriteTestData' >> beam.io.WriteToTFRecord(
                    os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE)))

            # Will write a SavedModel and metadata to two subdirectories of
            # working_dir, given by tft.TRANSFORM_FN_DIR and
            # tft.TRANSFORMED_METADATA_DIR respectively.
            _ = (transform_fn
                 |
                 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir))
Пример #15
0
    'y': 2,
    's': 'world'
}, {
    'x': 3,
    'y': 3,
    's': 'hello'
}]

transformed_dataset, transform_fn = (
    (raw_data, raw_data_metadata)
    | tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))

# NOTE: AnalyzeAndTransformDataset is the amalgamation of two tft_beam functions:
#  transformed_data, transform_fn = (my_data | tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))
# same as:
# a = tft_beam.AnalyzeDataset(preprocessing_fn)
# transform_fn = a.expand(my_data)   # my_data is a dataset, applies preprocessing_fn, returns a transform_fn objA
#       transform_fn is a pure function that is applied to every row of incoming dataset
#       at this point, tf.Transform analyzers (like tft.mean() have already been computed and are constants,
#       so transform_fn has constants for the mean of column x, the min and max of column y, i
#       and the vocabulary used to map the strings to integers
# all aggregation of data happens in AnalyzeDataset
# tranform_fun represented as a Tensorflow graph, so can be embedded into serving graph
transform_fn = my_data | tft_beam.AnalyzeDataset(preprocessing_fn)
# t = tft_beam.TransformDataset()   # instantiate this class
# transformed_data = t.expand( (my_data, transform_fn) )    # takes in a 2-tuple, outputs "dataset"
transformed_data = (my_data, transform_fn) | tft_beam.TransformDataset()
# where:
#  my_data is a "dataset": a typ
transformed_data, transformed_metadata = transformed_dataset
Пример #16
0
  def Do(self, input_dict: Dict[Text, List[types.Artifact]],
         output_dict: Dict[Text, List[types.Artifact]],
         exec_properties: Dict[Text, Any]) -> None:
    """Get human review result on a model through Slack channel.

    Args:
      input_dict: Input dict from input key to a list of artifacts, including:
        - model_export: exported model from trainer.
        - model_blessing: model blessing path from model_validator.
      output_dict: Output dict from key to a list of artifacts, including:
        - slack_blessing: model blessing result.
      exec_properties: A dict of execution properties, including:
        - slack_token: Token used to setup connection with slack server.
        - slack_channel_id: The id of the Slack channel to send and receive
          messages.
        - timeout_sec: How long do we wait for response, in seconds.

    Returns:
      None

    Raises:
      TimeoutError:
        When there is no decision made within timeout_sec.
      ConnectionError:
        When connection to slack server cannot be established.

    """
    self._log_startup(input_dict, output_dict, exec_properties)
    transform_graph_uri = artifact_utils.get_single_uri(
        input_dict[TRANSFORM_GRAPH_KEY])
    temp_path = os.path.join(transform_graph_uri, _TEMP_DIR_IN_TRANSFORM_OUTPUT)
    # transformed_schema_file = os.path.join(
    #   transform_graph_uri,
    #   tft.TFTransformOutput.TRANSFORMED_METADATA_DIR,
    #   'schema.pbtxt'
    # )
    # transformed_schema_proto = io_utils.parse_pbtxt_file(
    #   transformed_schema_file,
    #   schema_pb2.Schema()
    # )
    transformed_train_output = artifact_utils.get_split_uri(
      output_dict[TRANSFORMED_EXAMPLES_KEY], 'train')
    transformed_eval_output = artifact_utils.get_split_uri(
      output_dict[TRANSFORMED_EXAMPLES_KEY], 'eval')

    tf_transform_output = tft.TFTransformOutput(transform_graph_uri)
    # transform_output_dataset_metadata = dataset_metadata.DatasetMetadata(
    #   schema=transformed_schema_proto
    # )

    # transform_fn = (tf_transform_output.transform_raw_features, transform_output_dataset_metadata)
    # feature_spec = schema_utils.schema_as_feature_spec(schema_proto).feature_spec
    schema_file = io_utils.get_only_uri_in_dir(
        artifact_utils.get_single_uri(input_dict[SCHEMA_KEY]))
    schema_proto = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())
    transform_input_dataset_metadata = dataset_metadata.DatasetMetadata(
      schema_proto
    )

    train_data_uri = artifact_utils.get_split_uri(
      input_dict[EXAMPLES_KEY],
      'train'
    )
    eval_data_uri = artifact_utils.get_split_uri(
      input_dict[EXAMPLES_KEY],
      'eval'
    )
    analyze_data_paths = [io_utils.all_files_pattern(train_data_uri)]
    transform_data_paths = [
      io_utils.all_files_pattern(train_data_uri),
      io_utils.all_files_pattern(eval_data_uri),
    ]
    materialize_output_paths = [
      os.path.join(transformed_train_output, _DEFAULT_TRANSFORMED_EXAMPLES_PREFIX),
      os.path.join(transformed_eval_output, _DEFAULT_TRANSFORMED_EXAMPLES_PREFIX)
    ]
    transform_data_list = self._MakeDatasetList(
      transform_data_paths,
      materialize_output_paths
    )
    analyze_data_list = self._MakeDatasetList(
      analyze_data_paths,
    )

    with self._make_beam_pipeline() as pipeline:
      with tft_beam.Context(temp_dir=temp_path):
        # NOTE: Unclear if there is a difference between input_dataset_metadata
        # and transform_input_dataset_metadata. Look at Transform executor.
        decode_fn = tft.coders.ExampleProtoCoder(schema_proto, serialized=True).decode

        input_analysis_data = {}
        for dataset in analyze_data_list:
          infix = 'AnalysisIndex{}'.format(dataset.index)
          dataset.serialized = (
            pipeline
            | 'ReadDataset[{}]'.format(infix) >> self._ReadExamples(
                dataset, transform_input_dataset_metadata))
          dataset.decoded = (
            dataset.serialized
            | 'Decode[{}]'.format(infix)
            >> self._DecodeInputs(decode_fn))
          input_analysis_data[dataset.dataset_key] = dataset.decoded

        if not hasattr(tft_beam.analyzer_cache, 'DatasetKey'):
          input_analysis_data = (
              [
                  dataset for dataset in input_analysis_data.values()
                  if dataset is not None
              ]
              | 'FlattenAnalysisDatasetsBecauseItIsRequired' >>
              beam.Flatten(pipeline=pipeline))

        transform_fn = (
            (input_analysis_data, transform_input_dataset_metadata)
            | 'Analyze' >> tft_beam.AnalyzeDataset(
                tf_transform_output.transform_raw_features, pipeline=pipeline))

        for dataset in transform_data_list:
          infix = 'TransformIndex{}'.format(dataset.index)
          dataset.serialized = (
            pipeline
            | 'ReadDataset[{}]'.format(infix) >> self._ReadExamples(
                dataset, transform_input_dataset_metadata))

          dataset.decoded = (
            dataset.serialized
            | 'Decode[{}]'.format(infix)
            >> self._DecodeInputs(decode_fn))

          dataset.transformed, metadata = (
              ((dataset.decoded, transform_input_dataset_metadata), transform_fn)
              | 'Transform[{}]'.format(infix) >> tft_beam.TransformDataset())

          dataset.transformed_and_serialized = (
              dataset.transformed
              | 'EncodeAndSerialize[{}]'.format(infix)
              >> beam.ParDo(self._EncodeAsSerializedExamples(), _GetSchemaProto(metadata)))

          _ = (
            dataset.transformed_and_serialized
            | 'Materialize[{}]'.format(infix) >> self._WriteExamples(dataset.materialize_output_path))
Пример #17
0
def transform_data(working_dir):
    """Transform the data and write out as a TFRecord of Example protos.

  Read in the data from the positive and negative examples on disk, and
  transform it using a preprocessing pipeline that removes punctuation,
  tokenizes and maps tokens to int64 values indices.

  Args:
    working_dir: Directory to read shuffled data from and write transformed data
        and metadata to.
  """

    with beam.Pipeline() as pipeline:
        with tft_beam.Context(
                temp_dir=os.path.join(working_dir, TRANSFORM_TEMP_DIR)):
            tfxio_train_data = tfxio.TFExampleRecord(file_pattern=os.path.join(
                working_dir, SHUFFLED_TRAIN_DATA_FILEBASE + '*'),
                                                     schema=SCHEMA)
            train_data = (pipeline |
                          'TFXIORead[Train]' >> tfxio_train_data.BeamSource())

            tfxio_test_data = tfxio.TFExampleRecord(file_pattern=os.path.join(
                working_dir, SHUFFLED_TEST_DATA_FILEBASE + '*'),
                                                    schema=SCHEMA)
            test_data = (pipeline
                         | 'TFXIORead[Test]' >> tfxio_test_data.BeamSource())

            def preprocessing_fn(inputs):
                """Preprocess input columns into transformed columns."""
                review = inputs[REVIEW_KEY]

                # Here tf.compat.v1.string_split behaves differently from
                # tf.strings.split.
                review_tokens = tf.compat.v1.string_split(review, DELIMITERS)
                review_indices = tft.compute_and_apply_vocabulary(
                    review_tokens, top_k=VOCAB_SIZE)
                # Add one for the oov bucket created by compute_and_apply_vocabulary.
                review_bow_indices, review_weight = tft.tfidf(
                    review_indices, VOCAB_SIZE + 1)
                return {
                    REVIEW_KEY: review_bow_indices,
                    REVIEW_WEIGHT_KEY: review_weight,
                    LABEL_KEY: inputs[LABEL_KEY]
                }

            # Transformed metadata is not necessary for encoding.
            # The TFXIO output format is chosen for improved performance.
            (transformed_train_data, _), transform_fn = (
                (train_data, tfxio_train_data.TensorAdapterConfig())
                | 'AnalyzeAndTransform' >> tft_beam.AnalyzeAndTransformDataset(
                    preprocessing_fn, output_record_batches=True))

            transformed_test_data, _ = (
                ((test_data, tfxio_test_data.TensorAdapterConfig()),
                 transform_fn)
                | 'Transform' >>
                tft_beam.TransformDataset(output_record_batches=True))

            # Extract transformed RecordBatches, encode and write them to the given
            # directory.
            coder = tfxio.RecordBatchToExamplesEncoder()
            _ = (transformed_train_data
                 | 'EncodeTrainData' >>
                 beam.FlatMapTuple(lambda batch, _: coder.encode(batch))
                 | 'WriteTrainData' >> beam.io.WriteToTFRecord(
                     os.path.join(working_dir,
                                  TRANSFORMED_TRAIN_DATA_FILEBASE)))

            _ = (
                transformed_test_data
                | 'EncodeTestData' >>
                beam.FlatMapTuple(lambda batch, _: coder.encode(batch))
                | 'WriteTestData' >> beam.io.WriteToTFRecord(
                    os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE)))

            # Will write a SavedModel and metadata to two subdirectories of
            # working_dir, given by tft.TRANSFORM_FN_DIR and
            # tft.TRANSFORMED_METADATA_DIR respectively.
            _ = (transform_fn
                 |
                 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir))
Пример #18
0
    def test_caching_vocab_for_integer_categorical(self):

        span_0_key = 'span-0'
        span_1_key = 'span-1'

        def preprocessing_fn(inputs):
            return {
                'x_vocab':
                tft.compute_and_apply_vocabulary(inputs['x'],
                                                 frequency_threshold=2)
            }

        input_metadata = dataset_metadata.DatasetMetadata(
            schema_utils.schema_from_feature_spec({
                'x':
                tf.FixedLenFeature([], tf.int64),
            }))
        input_data_dict = {
            span_0_key: [{
                'x': -2,
            }, {
                'x': -4,
            }, {
                'x': -1,
            }, {
                'x': 4,
            }],
            span_1_key: [{
                'x': -2,
            }, {
                'x': -1,
            }, {
                'x': 6,
            }, {
                'x': 7,
            }],
        }
        expected_transformed_data = [{
            'x_vocab': 0,
        }, {
            'x_vocab': 1,
        }, {
            'x_vocab': -1,
        }, {
            'x_vocab': -1,
        }]
        with _TestPipeline() as p:
            flat_data = p | 'CreateInputData' >> beam.Create(
                list(itertools.chain(*input_data_dict.values())))

            cache_dict = {
                span_0_key: {
                    b'__v0__VocabularyAccumulate[compute_and_apply_vocabulary/vocabulary]-\x05e\xfe4\x03H.P\xb5\xcb\xd22\xe3\x16\x15\xf8\xf5\xe38\xd9':
                    p | 'CreateB' >> beam.Create(
                        [b'[-2, 2]', b'[-4, 1]', b'[-1, 1]', b'[4, 1]']),
                },
                span_1_key: {},
            }

            transform_fn, cache_output = (
                (flat_data, input_data_dict, cache_dict, input_metadata)
                | 'Analyze' >>
                tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))

            dot_string = nodes.get_dot_graph(
                [analysis_graph_builder._ANALYSIS_GRAPH]).to_string()
            self.WriteRenderedDotFile(dot_string)

            self.assertNotIn(span_0_key, cache_output)

            _ = cache_output | 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
                p, self._cache_dir)

            transformed_dataset = (
                ((input_data_dict[span_1_key], input_metadata), transform_fn)
                | 'Transform' >> tft_beam.TransformDataset())

            transformed_data, _ = transformed_dataset

            beam_test_util.assert_that(
                transformed_data,
                beam_test_util.equal_to(expected_transformed_data),
                label='first')

        # 4 from analysis since 1 span was completely cached, and 4 from transform.
        self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 8)
        self.assertEqual(
            _get_counter_value(p.metrics, 'cache_entries_decoded'), 1)
        self.assertEqual(
            _get_counter_value(p.metrics, 'cache_entries_encoded'), 1)
        self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'),
                         2)
Пример #19
0
    def test_single_phase_run_twice(self):

        span_0_key = 'span-0'
        span_1_key = 'span-1'

        def preprocessing_fn(inputs):

            _ = tft.vocabulary(inputs['s'], vocab_filename='vocab1')

            _ = tft.bucketize(inputs['x'], 2, name='bucketize')

            return {
                'x_min':
                tft.min(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
                'x_mean':
                tft.mean(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
                'y_min':
                tft.min(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
                'y_mean':
                tft.mean(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
                's_integerized':
                tft.compute_and_apply_vocabulary(
                    inputs['s'],
                    labels=inputs['label'],
                    use_adjusted_mutual_info=True),
            }

        input_metadata = dataset_metadata.DatasetMetadata(
            schema_utils.schema_from_feature_spec({
                'x':
                tf.io.FixedLenFeature([], tf.float32),
                'y':
                tf.io.FixedLenFeature([], tf.float32),
                's':
                tf.io.FixedLenFeature([], tf.string),
                'label':
                tf.io.FixedLenFeature([], tf.int64),
            }))
        input_data_dict = {
            span_0_key: [{
                'x': -2,
                'y': 1,
                's': 'a',
                'label': 0,
            }, {
                'x': 4,
                'y': -4,
                's': 'a',
                'label': 1,
            }, {
                'x': 5,
                'y': 11,
                's': 'a',
                'label': 1,
            }, {
                'x': 1,
                'y': -4,
                's': u'ȟᎥ𝒋ǩľḿꞑȯ𝘱𝑞𝗋𝘴'.encode('utf-8'),
                'label': 1,
            }],
            span_1_key: [{
                'x': 12,
                'y': 1,
                's': u'ȟᎥ𝒋ǩľḿꞑȯ𝘱𝑞𝗋𝘴'.encode('utf-8'),
                'label': 0
            }, {
                'x': 10,
                'y': 1,
                's': 'c',
                'label': 1
            }],
        }
        expected_vocabulary_contents = np.array(
            [b'a', u'ȟᎥ𝒋ǩľḿꞑȯ𝘱𝑞𝗋𝘴'.encode('utf-8'), b'c'], dtype=object)
        with _TestPipeline() as p:
            flat_data = p | 'CreateInputData' >> beam.Create(
                list(itertools.chain(*input_data_dict.values())))

            # wrap each value in input_data_dict as a pcoll.
            input_data_pcoll_dict = {}
            for a, b in six.iteritems(input_data_dict):
                input_data_pcoll_dict[a] = p | a >> beam.Create(b)

            transform_fn_1, cache_output = (
                (flat_data, input_data_pcoll_dict, {}, input_metadata)
                | 'Analyze' >>
                tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))
            _ = (cache_output
                 | 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
                     p, self._cache_dir))

            transformed_dataset = (((input_data_pcoll_dict[span_1_key],
                                     input_metadata), transform_fn_1)
                                   |
                                   'Transform' >> tft_beam.TransformDataset())

            del input_data_pcoll_dict
            transformed_data, unused_transformed_metadata = transformed_dataset

            expected_transformed_data = [
                {
                    'x_mean': 5.0,
                    'x_min': -2.0,
                    'y_mean': 1.0,
                    'y_min': -4.0,
                    's_integerized': 0,
                },
                {
                    'x_mean': 5.0,
                    'x_min': -2.0,
                    'y_mean': 1.0,
                    'y_min': -4.0,
                    's_integerized': 2,
                },
            ]
            beam_test_util.assert_that(
                transformed_data,
                beam_test_util.equal_to(expected_transformed_data),
                label='first')

            transform_fn_dir = os.path.join(self.base_test_dir,
                                            'transform_fn_1')
            _ = transform_fn_1 | tft_beam.WriteTransformFn(transform_fn_dir)

            for key in input_data_dict:
                self.assertIn(key, cache_output)
                self.assertEqual(7, len(cache_output[key]))

        tf_transform_output = tft.TFTransformOutput(transform_fn_dir)
        vocab1_path = tf_transform_output.vocabulary_file_by_name('vocab1')
        self.AssertVocabularyContents(vocab1_path,
                                      expected_vocabulary_contents)

        # 4 from analyzing 2 spans, and 2 from transform.
        self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 8)
        self.assertEqual(
            _get_counter_value(p.metrics, 'cache_entries_decoded'), 0)
        self.assertEqual(
            _get_counter_value(p.metrics, 'cache_entries_encoded'), 14)
        self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'),
                         2)

        with _TestPipeline() as p:
            flat_data = p | 'CreateInputData' >> beam.Create(
                list(itertools.chain(*input_data_dict.values())))

            # wrap each value in input_data_dict as a pcoll.
            input_data_pcoll_dict = {}
            for a, b in six.iteritems(input_data_dict):
                input_data_pcoll_dict[a] = p | a >> beam.Create(b)

            input_cache = p | analyzer_cache.ReadAnalysisCacheFromFS(
                self._cache_dir, list(input_data_dict.keys()))

            transform_fn_2, second_output_cache = (
                (flat_data, input_data_pcoll_dict, input_cache, input_metadata)
                | 'AnalyzeAgain' >>
                (tft_beam.AnalyzeDatasetWithCache(preprocessing_fn)))
            _ = (second_output_cache
                 | 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
                     p, self._cache_dir))

            dot_string = nodes.get_dot_graph(
                [analysis_graph_builder._ANALYSIS_GRAPH]).to_string()
            self.WriteRenderedDotFile(dot_string)

            transformed_dataset = (
                ((input_data_dict[span_1_key], input_metadata), transform_fn_2)
                | 'TransformAgain' >> tft_beam.TransformDataset())
            transformed_data, unused_transformed_metadata = transformed_dataset
            beam_test_util.assert_that(
                transformed_data,
                beam_test_util.equal_to(expected_transformed_data),
                label='second')

            transform_fn_dir = os.path.join(self.base_test_dir,
                                            'transform_fn_2')
            _ = transform_fn_2 | tft_beam.WriteTransformFn(transform_fn_dir)

        tf_transform_output = tft.TFTransformOutput(transform_fn_dir)
        vocab1_path = tf_transform_output.vocabulary_file_by_name('vocab1')
        self.AssertVocabularyContents(vocab1_path,
                                      expected_vocabulary_contents)

        self.assertFalse(second_output_cache)

        # Only 2 from transform.
        self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 2)
        self.assertEqual(
            _get_counter_value(p.metrics, 'cache_entries_decoded'), 14)
        self.assertEqual(
            _get_counter_value(p.metrics, 'cache_entries_encoded'), 0)

        # The root CreateSavedModel is optimized away because the data doesn't get
        # processed at all (only cache).
        self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'),
                         1)
Пример #20
0
    def test_single_phase_mixed_analyzer_run_once(self):
        span_0_key = 'span-0'
        span_1_key = 'span-1'

        def preprocessing_fn(inputs):

            integerized_s = tft.compute_and_apply_vocabulary(inputs['s'])

            _ = tft.bucketize(inputs['x'], 2, name='bucketize')

            return {
                'integerized_s':
                integerized_s,
                'x_min':
                tft.min(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
                'x_mean':
                tft.mean(inputs['x'], name='x') + tf.zeros_like(inputs['x']),
                'y_min':
                tft.min(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
                'y_mean':
                tft.mean(inputs['y'], name='y') + tf.zeros_like(inputs['y']),
            }

        # Run AnalyzeAndTransform on some input data and compare with expected
        # output.
        input_data = [{'x': 12, 'y': 1, 's': 'd'}, {'x': 10, 'y': 1, 's': 'c'}]
        input_metadata = dataset_metadata.DatasetMetadata(
            schema_utils.schema_from_feature_spec({
                'x':
                tf.io.FixedLenFeature([], tf.float32),
                'y':
                tf.io.FixedLenFeature([], tf.float32),
                's':
                tf.io.FixedLenFeature([], tf.string),
            }))
        input_data_dict = {
            span_0_key: [{
                'x': -2,
                'y': 1,
                's': 'b',
            }, {
                'x': 4,
                'y': -4,
                's': 'b',
            }],
            span_1_key:
            input_data,
        }

        with _TestPipeline() as p:
            flat_data = p | 'CreateInputData' >> beam.Create(
                list(itertools.chain(*input_data_dict.values())))
            cache_dict = {
                span_0_key: {
                    b'__v0__CacheableCombineAccumulate[x_1/mean_and_var]-.\xc4t>ZBv\xea\xa5SU\xf4\x065\xc6\x1c\x81W\xf9\x1b':
                    p | 'CreateA' >> beam.Create([b'[2.0, 1.0, 9.0, 0.0]']),
                    b'__v0__CacheableCombineAccumulate[x/x]-\x95\xc5w\x88\x85\x8b5V\xc9\x00\xe0\x0f\x03\x1a\xdaL\x9d\xd5\xb3\xe3':
                    p | 'CreateB' >> beam.Create([b'[2.0, 4.0]']),
                    b'__v0__CacheableCombineAccumulate[y_1/mean_and_var]-E^\xb7VZ\xeew4rm\xab\xa3\xa4k|J\x80ck\x16':
                    p | 'CreateC' >> beam.Create([b'[2.0, -1.5, 6.25, 0.0]']),
                    b'__v0__CacheableCombineAccumulate[y/y]-\xdf\x1ey\x03\x1c\x96\xd5'
                    b' e\x9bJ\xa1\xd2\xfc\x9c\x03\x0fM \xdb':
                    p | 'CreateD' >> beam.Create([b'[4.0, 1.0]']),
                },
                span_1_key: {},
            }

            transform_fn, cache_output = (
                (flat_data, input_data_dict, cache_dict, input_metadata)
                | 'Analyze' >>
                tft_beam.AnalyzeDatasetWithCache(preprocessing_fn))
            _ = (cache_output | 'WriteCache' >>
                 analyzer_cache.WriteAnalysisCacheToFS(p, self._cache_dir))

            transformed_dataset = (
                ((input_data_dict[span_1_key], input_metadata), transform_fn)
                | 'Transform' >> tft_beam.TransformDataset())

            dot_string = nodes.get_dot_graph(
                [analysis_graph_builder._ANALYSIS_GRAPH]).to_string()
            self.WriteRenderedDotFile(dot_string)

            # The output cache should not have entries for the cache that is present
            # in the input cache.
            self.assertEqual(len(cache_output[span_0_key]),
                             len(cache_output[span_1_key]) - 4)

            transformed_data, unused_transformed_metadata = transformed_dataset

            expected_transformed = [
                {
                    'x_mean': 6.0,
                    'x_min': -2.0,
                    'y_mean': -0.25,
                    'y_min': -4.0,
                    'integerized_s': 1,
                },
                {
                    'x_mean': 6.0,
                    'x_min': -2.0,
                    'y_mean': -0.25,
                    'y_min': -4.0,
                    'integerized_s': 2,
                },
            ]
            beam_test_util.assert_that(
                transformed_data,
                beam_test_util.equal_to(expected_transformed))

            transform_fn_dir = os.path.join(self.base_test_dir, 'transform_fn')
            _ = transform_fn | tft_beam.WriteTransformFn(transform_fn_dir)

        # 4 from analyzing 2 spans, and 2 from transform.
        self.assertEqual(_get_counter_value(p.metrics, 'num_instances'), 6)
        self.assertEqual(
            _get_counter_value(p.metrics, 'cache_entries_decoded'), 4)
        self.assertEqual(
            _get_counter_value(p.metrics, 'cache_entries_encoded'), 8)
        self.assertEqual(_get_counter_value(p.metrics, 'saved_models_created'),
                         2)
Пример #21
0
def transform_data(train_data_file, test_data_file, working_dir):
    """Transform the data and write out as a TFRecord of Example protos.

  Read in the data using the CSV reader, and transform it using a
  preprocessing pipeline that scales numeric data and converts categorical data
  from strings to int64 values indices, by creating a vocabulary for each
  category.

  Args:
    train_data_file: File containing training data
    test_data_file: File containing test data
    working_dir: Directory to write transformed data and metadata to
  """
    def preprocessing_fn(inputs):
        """Preprocess input columns into transformed columns."""
        # Since we are modifying some features and leaving others unchanged, we
        # start by setting `outputs` to a copy of `inputs.
        outputs = inputs.copy()

        # Scale numeric columns to have range [0, 1].
        for key in NUMERIC_FEATURE_KEYS:
            outputs[key] = tft.scale_to_0_1(inputs[key])

        for key in OPTIONAL_NUMERIC_FEATURE_KEYS:
            # This is a SparseTensor because it is optional. Here we fill in a default
            # value when it is missing.
            sparse = tf.sparse.SparseTensor(inputs[key].indices,
                                            inputs[key].values,
                                            [inputs[key].dense_shape[0], 1])
            dense = tf.sparse.to_dense(sp_input=sparse, default_value=0.)
            # Reshaping from a batch of vectors of size 1 to a batch to scalars.
            dense = tf.squeeze(dense, axis=1)
            outputs[key] = tft.scale_to_0_1(dense)

        # For all categorical columns except the label column, we generate a
        # vocabulary but do not modify the feature.  This vocabulary is instead
        # used in the trainer, by means of a feature column, to convert the feature
        # from a string to an integer id.
        for key in CATEGORICAL_FEATURE_KEYS:
            outputs[key] = tft.compute_and_apply_vocabulary(tf.strings.strip(
                inputs[key]),
                                                            num_oov_buckets=1,
                                                            vocab_filename=key)

        # For the label column we provide the mapping from string to index.
        table_keys = ['>50K', '<=50K']
        initializer = tf.lookup.KeyValueTensorInitializer(
            keys=table_keys,
            values=tf.cast(tf.range(len(table_keys)), tf.int64),
            key_dtype=tf.string,
            value_dtype=tf.int64)
        table = tf.lookup.StaticHashTable(initializer, default_value=-1)
        # Romove trailing periods for test data when the data is read with tf.data.
        label_str = tf.strings.regex_replace(inputs[LABEL_KEY], r'\.', '')
        label_str = tf.strings.strip(label_str)
        data_labels = table.lookup(label_str)
        transformed_label = tf.one_hot(indices=data_labels,
                                       depth=len(table_keys),
                                       on_value=1.0,
                                       off_value=0.0)
        outputs[LABEL_KEY] = tf.reshape(transformed_label,
                                        [-1, len(table_keys)])

        return outputs

    # The "with" block will create a pipeline, and run that pipeline at the exit
    # of the block.
    with beam.Pipeline() as pipeline:
        with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
            # Create a TFXIO to read the census data with the schema. To do this we
            # need to list all columns in order since the schema doesn't specify the
            # order of columns in the csv.
            # We first read CSV files and use BeamRecordCsvTFXIO whose .BeamSource()
            # accepts a PCollection[bytes] because we need to patch the records first
            # (see "FixCommasTrainData" below). Otherwise, tfxio.CsvTFXIO can be used
            # to both read the CSV files and parse them to TFT inputs:
            # csv_tfxio = tfxio.CsvTFXIO(...)
            # raw_data = (pipeline | 'ToRecordBatches' >> csv_tfxio.BeamSource())
            csv_tfxio = tfxio.BeamRecordCsvTFXIO(
                physical_format='text',
                column_names=ORDERED_CSV_COLUMNS,
                schema=SCHEMA)

            # Read in raw data and convert using CSV TFXIO.  Note that we apply
            # some Beam transformations here, which will not be encoded in the TF
            # graph since we don't do the from within tf.Transform's methods
            # (AnalyzeDataset, TransformDataset etc.).  These transformations are just
            # to get data into a format that the CSV TFXIO can read, in particular
            # removing spaces after commas.
            raw_data = (pipeline
                        | 'ReadTrainData' >> beam.io.ReadFromText(
                            train_data_file, coder=beam.coders.BytesCoder())
                        | 'FixCommasTrainData' >>
                        beam.Map(lambda line: line.replace(b', ', b','))
                        | 'DecodeTrainData' >> csv_tfxio.BeamSource())

            # Combine data and schema into a dataset tuple.  Note that we already used
            # the schema to read the CSV data, but we also need it to interpret
            # raw_data.
            raw_dataset = (raw_data, csv_tfxio.TensorAdapterConfig())
            transformed_dataset, transform_fn = (
                raw_dataset
                | tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))
            transformed_data, transformed_metadata = transformed_dataset
            transformed_data_coder = tft.coders.ExampleProtoCoder(
                transformed_metadata.schema)

            _ = (transformed_data
                 | 'EncodeTrainData' >> beam.Map(transformed_data_coder.encode)
                 | 'WriteTrainData' >> beam.io.WriteToTFRecord(
                     os.path.join(working_dir,
                                  TRANSFORMED_TRAIN_DATA_FILEBASE)))

            # Now apply transform function to test data.  In this case we remove the
            # trailing period at the end of each line, and also ignore the header line
            # that is present in the test data file.
            raw_test_data = (pipeline
                             | 'ReadTestData' >> beam.io.ReadFromText(
                                 test_data_file,
                                 skip_header_lines=1,
                                 coder=beam.coders.BytesCoder())
                             | 'FixCommasTestData' >>
                             beam.Map(lambda line: line.replace(b', ', b','))
                             | 'RemoveTrailingPeriodsTestData' >>
                             beam.Map(lambda line: line[:-1])
                             | 'DecodeTestData' >> csv_tfxio.BeamSource())

            raw_test_dataset = (raw_test_data, csv_tfxio.TensorAdapterConfig())

            transformed_test_dataset = ((raw_test_dataset, transform_fn)
                                        | tft_beam.TransformDataset())
            # Don't need transformed data schema, it's the same as before.
            transformed_test_data, _ = transformed_test_dataset

            _ = (
                transformed_test_data
                | 'EncodeTestData' >> beam.Map(transformed_data_coder.encode)
                | 'WriteTestData' >> beam.io.WriteToTFRecord(
                    os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE)))

            # Will write a SavedModel and metadata to working_dir, which can then
            # be read by the tft.TFTransformOutput class.
            _ = (transform_fn
                 |
                 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir))
Пример #22
0
def transform_data(input_handle,
                   outfile_prefix,
                   working_dir,
                   schema_file,
                   transform_dir=None,
                   max_rows=None,
                   pipeline_args=None):
  """The main tf.transform method which analyzes and transforms data.

  Args:
    input_handle: BigQuery table name to process specified as DATASET.TABLE or
      path to csv file with input data.
    outfile_prefix: Filename prefix for emitted transformed examples
    working_dir: Directory in which transformed examples and transform function
      will be emitted.
    schema_file: An file path that contains a text-serialized TensorFlow
      metadata schema of the input data.
    transform_dir: Directory in which the transform output is located. If
      provided, this will load the transform_fn from disk instead of computing
      it over the data. Hint: this is useful for transforming eval data.
    max_rows: Number of rows to query from BigQuery
    pipeline_args: additional DataflowRunner or DirectRunner args passed to the
      beam pipeline.
  """

  def transform_ngrams(input, ngram_range):
    """ helper function to transform ngrams and print output. """
    # this print statement causes output to concat itself!
    # input = tf.Print(input, [input], "raw input:", first_n=-1, summarize=100)

    transformed = transform.ngrams(
      tf.string_split(input, delimiter=" "),
      ngram_range=ngram_range,
      separator=' ')

    # SparseTensor basically cannot be printed because it's made up of 3
    # tensors. We can use this trick to print the values column, but without the index
    # it's not too meaningful.
    #
    # values = tf.Print(transformed.values, [transformed.values], "ngram output:")
    # transformed = tf.SparseTensor(
    #       indices=transformed.indices,
    #       values=values,
    #       dense_shape=transformed.dense_shape)
    return transformed

  def preprocessing_fn(inputs):
    """tf.transform's callback function for preprocessing inputs.
    https://cloud.google.com/solutions/machine-learning/data-preprocessing-for-ml-with-tf-transform-pt2

    Args:
      inputs: map from feature keys to raw not-yet-transformed features.

    Returns:
      Map from string feature key to transformed feature operations.
    """
    outputs = {}
    for key in taxi.DENSE_FLOAT_FEATURE_KEYS:
      print('processing key', key)
      print('input:', inputs[key])
      # Preserve this feature as a dense float, setting nan's to the mean.
      outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(
          _fill_in_missing(inputs[key]))

    for key in taxi.VOCAB_FEATURE_KEYS:
      # Build a vocabulary for this feature.
      outputs[
          taxi.transformed_name(key)] = transform.compute_and_apply_vocabulary(
              _fill_in_missing(inputs[key]),
              top_k=taxi.VOCAB_SIZE,
              num_oov_buckets=taxi.OOV_SIZE)

    # for key in taxi.FEATURE_NGRAM:
    #   # Extract nggrams and build a vocab.
    #   outputs[
    #       taxi.transformed_name(key)] = transform.compute_and_apply_vocabulary(
    #           transform.ngrams(
    #             tf.string_split(_fill_in_missing(inputs[key])),
    #             ngram_range=taxi.NGRAM_RANGE,
    #             separator=' '),
    #           top_k=512,
    #           num_oov_buckets=taxi.OOV_SIZE)

    for key in taxi.FEATURE_NGRAM:
      # Extract nggrams and build a vocab.
      outputs[
          taxi.transformed_name(key)] = transform.compute_and_apply_vocabulary(
            transform_ngrams(_fill_in_missing(inputs[key]), taxi.NGRAM_RANGE),
            top_k=taxi.VOCAB_SIZE,
            num_oov_buckets=taxi.OOV_SIZE)

    for key in taxi.BUCKET_FEATURE_KEYS:
      outputs[taxi.transformed_name(key)] = transform.bucketize(
          _fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT)

    for key in taxi.CATEGORICAL_FEATURE_KEYS:
      outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key])

    # Was this passenger a big tipper?
    taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY])
    tips = _fill_in_missing(inputs[taxi.LABEL_KEY])
    outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(
        tf.is_nan(taxi_fare),
        tf.cast(tf.zeros_like(taxi_fare), tf.int64),
        # Test if the tip was > 20% of the fare.
        tf.cast(
            tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))),
            tf.int64))

    return outputs

  schema = taxi.read_schema(schema_file)
  raw_feature_spec = taxi.get_raw_feature_spec(schema)
  raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
  raw_data_metadata = dataset_metadata.DatasetMetadata(raw_schema)

  with beam.Pipeline(argv=pipeline_args) as pipeline:
    with tft_beam.Context(temp_dir=working_dir):
      if input_handle.lower().endswith('csv'):
        csv_coder = taxi.make_csv_coder(schema, input_handle.lower())
        raw_data = (
            pipeline
            | 'ReadFromText' >> beam.io.ReadFromText(
                input_handle, skip_header_lines=1))
        decode_transform = beam.Map(csv_coder.decode)
      else:
        query = taxi.make_sql(input_handle, max_rows, for_eval=False)
        raw_data = (
            pipeline
            | 'ReadBigQuery' >> beam.io.Read(
                beam.io.BigQuerySource(query=query, use_standard_sql=True)))
        decode_transform = beam.Map(
            taxi.clean_raw_data_dict, raw_feature_spec=raw_feature_spec)

      if transform_dir is None:
        decoded_data = raw_data | 'DecodeForAnalyze' >> decode_transform
        transform_fn = (
            (decoded_data, raw_data_metadata) |
            ('Analyze' >> tft_beam.AnalyzeDataset(preprocessing_fn)))

        _ = (
            transform_fn
            | ('WriteTransformFn' >>
               tft_beam.WriteTransformFn(working_dir)))
      else:
        transform_fn = pipeline | tft_beam.ReadTransformFn(transform_dir)

      # Shuffling the data before materialization will improve Training
      # effectiveness downstream. Here we shuffle the raw_data (as opposed to
      # decoded data) since it has a compact representation.
      shuffled_data = raw_data | 'RandomizeData' >> beam.transforms.Reshuffle()

      decoded_data = shuffled_data | 'DecodeForTransform' >> decode_transform
      (transformed_data, transformed_metadata) = (
          ((decoded_data, raw_data_metadata), transform_fn)
          | 'Transform' >> tft_beam.TransformDataset())

      coder = example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)
      _ = (
          transformed_data
          | 'SerializeExamples' >> beam.Map(coder.encode)
          | 'WriteExamples' >> beam.io.WriteToTFRecord(
              os.path.join(working_dir, outfile_prefix), file_name_suffix='.gz')
      )
Пример #23
0
def transform_data(input_handle,
                   outfile_prefix,
                   working_dir,
                   schema_file,
                   transform_dir=None,
                   max_rows=None,
                   pipeline_args=None):
    """The main tf.transform method which analyzes and transforms data.

  Args:
    input_handle: BigQuery table name to process specified as DATASET.TABLE or
      path to csv file with input data.
    outfile_prefix: Filename prefix for emitted transformed examples
    working_dir: Directory in which transformed examples and transform function
      will be emitted.
    schema_file: An file path that contains a text-serialized TensorFlow
      metadata schema of the input data.
    transform_dir: Directory in which the transform output is located. If
      provided, this will load the transform_fn from disk instead of computing
      it over the data. Hint: this is useful for transforming eval data.
    max_rows: Number of rows to query from BigQuery
    pipeline_args: additional DataflowRunner or DirectRunner args passed to the
      beam pipeline.
  """
    def preprocessing_fn(inputs):
        """tf.transform's callback function for preprocessing inputs.

    Args:
      inputs: map from feature keys to raw not-yet-transformed features.

    Returns:
      Map from string feature key to transformed feature operations.
    """
        outputs = {}
        for key in taxi.DENSE_FLOAT_FEATURE_KEYS:
            # Preserve this feature as a dense float, setting nan's to the mean.
            outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(
                _fill_in_missing(inputs[key]))

        for key in taxi.VOCAB_FEATURE_KEYS:
            # Build a vocabulary for this feature.
            outputs[taxi.transformed_name(
                key)] = transform.compute_and_apply_vocabulary(
                    _fill_in_missing(inputs[key]),
                    top_k=taxi.VOCAB_SIZE,
                    num_oov_buckets=taxi.OOV_SIZE)

        for key in taxi.BUCKET_FEATURE_KEYS:
            outputs[taxi.transformed_name(key)] = transform.bucketize(
                _fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT)

        for key in taxi.CATEGORICAL_FEATURE_KEYS:
            outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key])

        # Was this passenger a big tipper?
        taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY])
        tips = _fill_in_missing(inputs[taxi.LABEL_KEY])
        outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(
            tf.is_nan(taxi_fare),
            tf.cast(tf.zeros_like(taxi_fare), tf.int64),
            # Test if the tip was > 20% of the fare.
            tf.cast(tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))),
                    tf.int64))

        return outputs

    schema = taxi.read_schema(schema_file)
    raw_feature_spec = taxi.get_raw_feature_spec(schema)
    raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
    raw_data_metadata = dataset_metadata.DatasetMetadata(raw_schema)

    with beam.Pipeline(argv=pipeline_args) as pipeline:
        with tft_beam.Context(temp_dir=working_dir):
            if input_handle.lower().endswith('csv'):
                csv_coder = taxi.make_csv_coder(schema)
                raw_data = (pipeline
                            | 'ReadFromText' >> beam.io.ReadFromText(
                                input_handle, skip_header_lines=1))
                decode_transform = beam.Map(csv_coder.decode)
            else:
                query = taxi.make_sql(input_handle, max_rows, for_eval=False)
                raw_data = (pipeline
                            | 'ReadBigQuery' >> beam.io.Read(
                                beam.io.BigQuerySource(query=query,
                                                       use_standard_sql=True)))
                decode_transform = beam.Map(taxi.clean_raw_data_dict,
                                            raw_feature_spec=raw_feature_spec)

            if transform_dir is None:
                decoded_data = raw_data | 'DecodeForAnalyze' >> decode_transform
                transform_fn = (
                    (decoded_data, raw_data_metadata) |
                    ('Analyze' >> tft_beam.AnalyzeDataset(preprocessing_fn)))

                _ = (transform_fn
                     | ('WriteTransformFn' >>
                        tft_beam.WriteTransformFn(working_dir)))
            else:
                transform_fn = pipeline | tft_beam.ReadTransformFn(
                    transform_dir)

            # Shuffling the data before materialization will improve Training
            # effectiveness downstream. Here we shuffle the raw_data (as opposed to
            # decoded data) since it has a compact representation.
            shuffled_data = raw_data | 'RandomizeData' >> beam.transforms.Reshuffle(
            )

            decoded_data = shuffled_data | 'DecodeForTransform' >> decode_transform
            (transformed_data, transformed_metadata) = (
                ((decoded_data, raw_data_metadata), transform_fn)
                | 'Transform' >> tft_beam.TransformDataset())

            coder = example_proto_coder.ExampleProtoCoder(
                transformed_metadata.schema)
            _ = (transformed_data
                 | 'SerializeExamples' >> beam.Map(coder.encode)
                 | 'WriteExamples' >> beam.io.WriteToTFRecord(
                     os.path.join(working_dir, outfile_prefix),
                     file_name_suffix='.gz'))
Пример #24
0
def transform_data(train_data_file, test_data_file, working_dir):
    """Transform the data and write out as a TFRecord of Example protos.
    Read in the data using the parquet io, and transform it using a
    preprocessing pipeline that scales numeric data and converts categorical data
    from strings to int64 values indices, by creating a vocabulary for each
    category.
    Args:
      train_data_file: File containing training data
      test_data_file: File containing test data
      feature_config: named tuple with feature types
      working_dir: Directory to write transformed data and metadata to
    """

    numerical_feats = [
        "startCountTotal", "purchaseCountTotal", "globalStartCountTotal",
        "globalPurchaseCountTotal"
    ]

    categorical_feats = ["country", "sourceGameId", "platform"]

    def preprocessing_fn(inputs):
        """Preprocess input columns into transformed columns."""
        outputs = {}

        for key in numerical_feats:
            outputs[key] = tf.cast(tft.bucketize(inputs[key], 20),
                                   tf.float32) / 20.0 - 0.5

        outputs["campaignCost_mod"] = inputs["campaignCost"] / 100.0

        inputs["game_zone"] = tf.string_join(
            [inputs["sourceGameId"], inputs["zone"]], separator="_")
        inputs["game_campaignId"] = tf.string_join(
            [inputs["sourceGameId"], inputs["campaignId"]], separator="_")

        for key in categorical_feats + ["game_zone", "game_campaignId"]:
            vocab = tft.vocabulary(inputs[key],
                                   vocab_filename=key,
                                   frequency_threshold=100)
            outputs[key] = tft.apply_vocabulary(inputs[key],
                                                vocab,
                                                default_value=0)

        outputs["label"] = inputs["label"]
        outputs["key"] = inputs["key"]

        return outputs

    # Input schema definition
    RAW_DATA_METADATA = gather_raw_metadata(
        numerical_feats + ["campaignCost"],
        categorical_feats + ["zone", "campaignId", "key"])

    # pipeline args to read from gcs, currently unused because reading local file
    pipeline_args = [
        '--runner=DirectRunner',
        '--project=unity-ads-ds-prd',
        #     '--staging_location=gs://unity-ads-ds-prd-users/villew/promo/staging',
        #     '--temp_location=gs://unity-ads-ds-prd-users/villew/promo/temp',
        '--job_name=transform-promo-data-to-tf-records'
    ]
    pipeline_options = PipelineOptions(pipeline_args)
    pipeline_options.view_as(SetupOptions).save_main_session = True

    # create a beam pipeline
    with beam.Pipeline(options=pipeline_options) as pipeline:
        with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
            raw_data = (
                pipeline
                | 'ReadTrainData' >> beam.io.ReadFromParquet(train_data_file))

            # Combine data and schema into a dataset tuple.
            raw_dataset = (raw_data, RAW_DATA_METADATA)
            transformed_dataset, transform_fn = (
                raw_dataset
                | tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))
            transformed_data, transformed_metadata = transformed_dataset
            transformed_data_coder = tft.coders.ExampleProtoCoder(
                transformed_metadata.schema)

            # write to tf record
            _ = (transformed_data
                 | 'EncodeTrainData' >> beam.Map(transformed_data_coder.encode)
                 | 'WriteTrainData' >> beam.io.WriteToTFRecord(
                     os.path.join(working_dir, "train_tfrecord")))

            # Now apply transform function to test data.
            raw_test_data = (
                pipeline
                | 'ReadTestData' >> beam.io.ReadFromParquet(test_data_file))

            raw_test_dataset = (raw_test_data, RAW_DATA_METADATA)

            transformed_test_dataset = ((raw_test_dataset, transform_fn)
                                        | tft_beam.TransformDataset())

            # Don't need transformed data schema, it's the same as before.
            transformed_test_data, _ = transformed_test_dataset

            _ = (transformed_test_data
                 | 'EncodeTestData' >> beam.Map(transformed_data_coder.encode)
                 | 'WriteTestData' >> beam.io.WriteToTFRecord(
                     os.path.join(working_dir, "test_tfrecord")))

            # Will write a SavedModel and metadata to working_dir, which can then
            # be read by the tft.TFTransformOutput class.
            _ = (transform_fn
                 |
                 'WriteTransformFn' >> tft_beam.WriteTransformFn(working_dir))
Пример #25
0
def build_pipeline(df: pd.DataFrame, job_label: str, runner: str, project: str,
                   region: str, output_dir: str, compression: str,
                   num_shards: int, dataflow_options: dict,
                   integer_label: bool) -> beam.Pipeline:
    """Runs TFRecorder Beam Pipeline.

  Args:
    df: Pandas DataFrame
    job_label: User description for the beam job.
    runner: Beam Runner: (e.g. DataflowRunner, DirectRunner).
    project: GCP project ID (if DataflowRunner)
    region: GCP compute region (if DataflowRunner)
    output_dir: GCS or Local Path for output.
    compression: gzip or None.
    num_shards: Number of shards.
    dataflow_options: Dataflow Runner Options (optional)
    integer_label: Flags if label is already an integer.

  Returns:
    beam.Pipeline

  Note: These inputs must be validated upstream (by client.create_tfrecord())
  """

    job_name = _get_job_name(job_label)
    job_dir = _get_job_dir(output_dir, job_name)
    options = _get_pipeline_options(runner, job_name, job_dir, project, region,
                                    dataflow_options)

    #with beam.Pipeline(runner, options=options) as p:
    p = beam.Pipeline(options=options)
    with tft_beam.Context(temp_dir=os.path.join(job_dir, 'tft_tmp')):

        converter = tft.coders.CsvCoder(constants.IMAGE_CSV_COLUMNS,
                                        constants.IMAGE_CSV_METADATA.schema)

        extract_images_fn = beam_image.ExtractImagesDoFn(
            constants.IMAGE_URI_KEY)
        flatten_rows = ToCSVRows()

        # Each element in the image_csv_data PCollection will be a dict
        # including the image_csv_columns and the image features created from
        # extract_images_fn.
        image_csv_data = (
            p
            | 'ReadFromDataFrame' >> beam.Create(df.values.tolist())
            | 'ToCSVRows' >> beam.ParDo(flatten_rows)
            | 'DecodeCSV' >> beam.Map(converter.decode)
            | 'ReadImage' >> beam.ParDo(extract_images_fn))

        # Split dataset into train and validation.
        train_data, val_data, test_data, discard_data = (
            image_csv_data | 'SplitDataset' >> beam.Partition(
                _partition_fn, len(constants.SPLIT_VALUES)))

        train_dataset = (train_data, constants.RAW_METADATA)
        val_dataset = (val_data, constants.RAW_METADATA)
        test_dataset = (test_data, constants.RAW_METADATA)

        # TensorFlow Transform applied to all datasets.
        preprocessing_fn = functools.partial(_preprocessing_fn,
                                             integer_label=integer_label)
        transformed_train_dataset, transform_fn = (
            train_dataset
            | 'AnalyzeAndTransformTrain' >>
            tft_beam.AnalyzeAndTransformDataset(preprocessing_fn))

        transformed_train_data, transformed_metadata = transformed_train_dataset
        transformed_data_coder = tft.coders.ExampleProtoCoder(
            transformed_metadata.schema)

        transformed_val_data, _ = (
            (val_dataset, transform_fn)
            | 'TransformVal' >> tft_beam.TransformDataset())

        transformed_test_data, _ = (
            (test_dataset, transform_fn)
            | 'TransformTest' >> tft_beam.TransformDataset())

        # Sinks for TFRecords and metadata.
        tfr_writer = functools.partial(_get_write_to_tfrecord,
                                       output_dir=job_dir,
                                       compress=compression,
                                       num_shards=num_shards)

        _ = (transformed_train_data
             | 'EncodeTrainData' >> beam.Map(transformed_data_coder.encode)
             | 'WriteTrainData' >> tfr_writer(prefix='train'))

        _ = (transformed_val_data
             | 'EncodeValData' >> beam.Map(transformed_data_coder.encode)
             | 'WriteValData' >> tfr_writer(prefix='val'))

        _ = (transformed_test_data
             | 'EncodeTestData' >> beam.Map(transformed_data_coder.encode)
             | 'WriteTestData' >> tfr_writer(prefix='test'))

        _ = (discard_data
             | 'DiscardDataWriter' >> beam.io.WriteToText(
                 os.path.join(job_dir, 'discarded-data')))

        # Output transform function and metadata
        _ = (transform_fn
             | 'WriteTransformFn' >> tft_beam.WriteTransformFn(job_dir))

        # Output metadata schema
        _ = (transformed_metadata
             | 'WriteMetadata' >> tft_beam.WriteMetadata(job_dir, pipeline=p))

    return p
Пример #26
0
def transform_data(bq_table,
                   step,
                   schema_file,
                   working_dir,
                   outfile_prefix,
                   max_rows=None,
                   transform_dir=None,
                   pipeline_args=None):
    # todo : documentation
    """

    :param project:
    :param dataset:
    :param table:
    :param step:
    :param negative_sampling_ratio:
    :param train_cut:
    :param test_tenth:
    :param schema_file:
    :param working_dir:
    :param outfile_prefix:
    :param transform_dir:
    :param pipeline_args:
    :return:
    """

    def preprocessing_fn(inputs):
        """tf.transform's callback function for preprocessing inputs.

        Args:
          inputs: map from feature keys to raw not-yet-transformed features.

        Returns:
          Map from string feature key to transformed feature operations.
        """
        outputs = {}
        for key in my_metadata.NUMERIC_FEATURE_KEYS:
            # Preserve this feature as a dense float, setting nan's to the mean.
            outputs[my_metadata.transformed_name(key)] = transform.scale_to_z_score(_fill_in_missing(inputs[key]))

        for key in my_metadata.VOCAB_FEATURE_KEYS:
            # Build a vocabulary for this feature.
            outputs[my_metadata.transformed_name(key)] = transform.compute_and_apply_vocabulary(
                _fill_in_missing(inputs[key]),
                vocab_filename=my_metadata.transformed_name(key),
                num_oov_buckets=my_metadata.OOV_SIZE,
                top_k=my_metadata.VOCAB_SIZE
            )

        for key, hash_buckets in my_metadata.HASH_STRING_FEATURE_KEYS.items():
            outputs[my_metadata.transformed_name(key)] = transform.hash_strings(
                _fill_in_missing(inputs[key]),
                hash_buckets=hash_buckets
            )

        for key, nb_buckets in my_metadata.TO_BE_BUCKETIZED_FEATURE.items():
            outputs[my_metadata.transformed_name(key +'_bucketized')] = transform.bucketize(
                _fill_in_missing(inputs[key]), nb_buckets)


        # Was this passenger a big tipper?
        taxi_fare = _fill_in_missing(inputs[my_metadata.FARE_KEY])
        tips = _fill_in_missing(inputs[my_metadata.LABEL_KEY])
        outputs[my_metadata.transformed_name(my_metadata.LABEL_KEY)] = tf.where(
            tf.is_nan(taxi_fare),
            tf.cast(tf.zeros_like(taxi_fare), tf.int64),
            # Test if the tip was > 20% of the fare.
            tf.cast(
                tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))),
                tf.int64))

        return outputs

    schema = my_metadata.read_schema(schema_file)
    raw_feature_spec = my_metadata.get_raw_feature_spec(schema)
    raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
    raw_data_metadata = dataset_metadata.DatasetMetadata(raw_schema)

    with beam.Pipeline(argv=pipeline_args) as pipeline:
        with tft_beam.Context(temp_dir=working_dir):
            query = sql_queries.get_train_test_sql_query(bq_table, step, max_rows)
            raw_data = (
                    pipeline
                    | 'ReadBigQuery' >> beam.io.Read(
                beam.io.BigQuerySource(query=query, use_standard_sql=True))
                    | 'CleanData' >> beam.Map(
                my_metadata.clean_raw_data_dict, raw_feature_spec=raw_feature_spec))

            if transform_dir is None:
                transform_fn = (
                        (raw_data, raw_data_metadata)
                        | ('Analyze' >> tft_beam.AnalyzeDataset(preprocessing_fn)))

                _ = (
                        transform_fn
                        | ('WriteTransformFn' >>
                           tft_beam.WriteTransformFn(working_dir)))
            else:
                transform_fn = pipeline | tft_beam.ReadTransformFn(transform_dir)

            # Shuffling the data before materialization will improve Training
            # effectiveness downstream.
            shuffled_data = raw_data | 'RandomizeData' >> beam.transforms.Reshuffle()

            (transformed_data, transformed_metadata) = (
                    ((shuffled_data, raw_data_metadata), transform_fn)
                    | 'Transform' >> tft_beam.TransformDataset())

            coder = example_proto_coder.ExampleProtoCoder(transformed_metadata.schema)
            _ = (
                    transformed_data
                    | 'SerializeExamples' >> beam.Map(coder.encode)
                    | 'WriteExamples' >> beam.io.WriteToTFRecord(
                os.path.join(working_dir, outfile_prefix))
            )