Exemple #1
0
    def _saved_model_loader(self) -> saved_transform_io_v2.SavedModelLoader:
        """A `saved_transform_io_v2.SavedModelLoader`."""
        if self._saved_model_loader_value is None:
            self._saved_model_loader_value = saved_transform_io_v2.SavedModelLoader(
                self._tft_output.transform_savedmodel_dir)
            self._loaded_saved_model_graph = ops.get_default_graph()

        # TODO(b/160294509): Use tf.compat.v1 when we stop supporting TF 1.15.
        if ops.executing_eagerly_outside_functions():
            return self._saved_model_loader_value
        else:
            assert not self._exported_as_v1
            # TODO(b/149997088): Raise an exception once we no longer support using
            # the Keras layer with estimator based Trainer.
            tf.compat.v1.logging.warning(
                'Loading a TF2 SavedModel but eager mode '
                'seems disabled.')
            # If exported as TF2 SavedModel but not invoked in eager mode,
            # re-initialize the saved_model_loader_value as __init__ could have been
            # called in a different graph context.
            default_graph = ops.get_default_graph()
            if (self._loaded_saved_model_graph is None
                    or self._loaded_saved_model_graph is not default_graph):
                self._saved_model_loader_value = saved_transform_io_v2.SavedModelLoader(
                    self._tft_output.transform_savedmodel_dir)
                self._loaded_saved_model_graph = default_graph
            return self._saved_model_loader_value
Exemple #2
0
  def test_table_roundtrip(self, exported_in_tf1):
    input_specs = {'input': tf.TensorSpec([], dtype=tf.string)}

    def preprocessing_fn(inputs):
      table_keys = ['cat', 'dog', 'giraffe']
      initializer = tf.lookup.KeyValueTensorInitializer(
          keys=table_keys,
          values=tf.cast(tf.range(len(table_keys)), tf.int64),
          key_dtype=tf.string,
          value_dtype=tf.int64)
      table = tf.lookup.StaticHashTable(initializer, default_value=-1)
      return {'output': table.lookup(inputs['input'])}

    export_path = _create_test_saved_model(
        exported_in_tf1,
        input_specs,
        preprocessing_fn,
        base_dir=self.get_temp_dir())

    # Using a computed input gives confidence that the graphs are fused.
    input_string = tf.constant('dog')
    inputs = {'input': input_string}
    saved_model_loader = saved_transform_io_v2.SavedModelLoader(export_path)
    outputs = saved_model_loader.apply_transform_model(inputs)
    self.assertEqual(1, outputs['output'].numpy())
Exemple #3
0
    def test_sparse_roundtrip(self, exported_in_tf1):
        input_specs = {
            'input': tf.SparseTensorSpec([None, None, None], dtype=tf.float32)
        }

        def foo(inputs):
            return {'output': inputs['input'] / 5.0}

        export_path = _create_test_saved_model(exported_in_tf1, input_specs,
                                               foo)

        indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
        values = np.array([1.0, 2.0], dtype=np.float32)
        shape = np.array([7, 9, 2], dtype=np.int64)
        input_sparse = tf.SparseTensor(indices=indices,
                                       values=values,
                                       dense_shape=shape)

        # Using a computed input gives confidence that the graphs are fused
        inputs = {'input': input_sparse * 10}
        saved_model_loader = saved_transform_io_v2.SavedModelLoader(
            export_path)
        outputs = saved_model_loader.apply_transform_model(inputs)
        result = outputs['output']
        self.assertIsInstance(result, tf.SparseTensor)

        # indices and shape unchanged; values multiplied by 10 and divided by 5
        self.assertEqual(indices.tolist(), result.indices.numpy().tolist())
        self.assertEqual([2.0, 4.0], result.values.numpy().tolist())
        self.assertEqual(shape.tolist(), result.dense_shape.numpy().tolist())
Exemple #4
0
    def test_table_roundtrip(self):
        export_path = os.path.join(tempfile.mkdtemp(), 'export')

        with tf.compat.v1.Graph().as_default():
            with tf.compat.v1.Session().as_default() as session:
                input_string = tf.compat.v1.placeholder(tf.string)
                # Map string through a table, in this case based on a constant tensor.
                table_keys = ['cat', 'dog', 'giraffe']
                initializer = tf.lookup.KeyValueTensorInitializer(
                    keys=table_keys,
                    values=tf.cast(tf.range(len(table_keys)), tf.int64),
                    key_dtype=tf.string,
                    value_dtype=tf.int64)
                table = tf.lookup.StaticHashTable(initializer,
                                                  default_value=-1)

                output = table.lookup(input_string)
                inputs = {'input': input_string}
                outputs = {'output': output}
                saved_transform_io.write_saved_transform_from_session(
                    session, inputs, outputs, export_path)

        # Using a computed input gives confidence that the graphs are fused.
        input_string = tf.constant('dog')
        inputs = {'input': input_string}
        saved_model_loader = saved_transform_io_v2.SavedModelLoader(
            export_path)
        outputs = saved_model_loader.apply_v1_transform_model_in_v2(inputs)
        self.assertEqual(1, outputs['output'].numpy())
Exemple #5
0
    def test_ragged_with_unfed(self, exported_in_tf1):
        input_specs = {
            'x': tf.RaggedTensorSpec([
                None,
                None,
            ], dtype=tf.float32),
            'y': tf.RaggedTensorSpec([
                None,
            ], dtype=tf.float32)
        }

        def preprocessing_fn(inputs):
            output = (inputs['x'] - 2.0) / 5.0
            return {
                'x_scaled': output,
                'x_in': inputs['x'],
                'y': inputs['y'] + 1
            }

        export_path = _create_test_saved_model(exported_in_tf1,
                                               input_specs,
                                               preprocessing_fn,
                                               base_dir=self.get_temp_dir())
        saved_model_loader = saved_transform_io_v2.SavedModelLoader(
            export_path)

        # Missing 'y'.
        input_features = {'x': tf.ragged.constant([[1237.0]], ragged_rank=1)}
        transformed_features = (
            saved_model_loader.apply_transform_model(input_features))
        self.assertCountEqual(['x_in', 'x_scaled'], list(transformed_features))
        self.assertAllEqual(transformed_features['x_scaled'].numpy(),
                            [[247.0]])
        self.assertAllEqual(transformed_features['x_in'].numpy(), [[1237.0]])
Exemple #6
0
    def __init__(self, tft_output):
        super(TransformFeaturesLayer, self).__init__(trainable=False)
        self._tft_output = tft_output
        # TODO(b/160294509): Use tf.compat.v1 when we stop supporting TF 1.15.
        if ops.executing_eagerly_outside_functions():
            _check_tensorflow_version()
            self._saved_model_loader = saved_transform_io_v2.SavedModelLoader(
                tft_output.transform_savedmodel_dir)
            # The model must be tracked by assigning to an attribute of the Keras
            # layer. Hence, we track the attributes of _saved_model_loader here as
            # well.
            self._saved_model_loader_tracked_dict = self._saved_model_loader.__dict__
        else:
            self._saved_model_loader = None

        # TODO(b/162055065): This is needed because otherwise we'd get an error in
        # some cases:
        # ValueError: Your Layer or Model is in an invalid state. This can happen
        # if you are interleaving estimator/non-estimator models or interleaving
        # models/layers made in tf.compat.v1.Graph.as_default() with models/layers
        # created outside of it. Converting a model to an estimator (via
        # model_to_estimator) invalidates all models/layers made before the
        # conversion (even if they were not the model converted to an estimator).
        # Similarly, making a layer or a model inside a a tf.compat.v1.Graph
        # invalidates all layers/models you previously made outside of the graph.
        if (not self._saved_model_loader
                or self._saved_model_loader.load_v2_in_compat):
            self._originally_built_as_v1 = True
Exemple #7
0
    def test_ragged_roundtrip(self, exported_in_tf1):
        if not hasattr(meta_graph_pb2.TensorInfo, 'CompositeTensor'):
            self.skipTest('This version of TensorFlow does not support '
                          'CompositeTenors in TensorInfo.')
        input_specs = {
            'input':
            tf.RaggedTensorSpec(shape=[None, None],
                                dtype=tf.float32,
                                ragged_rank=1,
                                row_splits_dtype=tf.int64)
        }

        def foo(inputs):
            return {'output': inputs['input'] / 2.0}

        export_path = _create_test_saved_model(exported_in_tf1, input_specs,
                                               foo)

        splits = np.array([0, 2, 3], dtype=np.int64)
        values = np.array([1.0, 2.0, 4.0], dtype=np.float32)
        input_ragged = tf.RaggedTensor.from_row_splits(values, splits)

        # Using a computed input gives confidence that the graphs are fused
        inputs = {'input': input_ragged * 10}
        saved_model_loader = saved_transform_io_v2.SavedModelLoader(
            export_path)
        outputs = saved_model_loader.apply_transform_model(inputs)
        result = outputs['output']
        self.assertIsInstance(result, tf.RaggedTensor)

        # indices and shape unchanged; values multipled by 10 and divided by 2
        self.assertAllEqual(splits, result.row_splits)
        self.assertEqual([5.0, 10.0, 20.0], result.values.numpy().tolist())
Exemple #8
0
    def test_stale_asset_collections_are_cleaned(self):
        exported_in_tf_1 = False
        vocabulary_file = os.path.join(tempfile.mkdtemp(), 'asset')
        file_io.write_string_to_file(vocabulary_file, 'foo bar baz')

        input_specs = {'input': tf.TensorSpec([], dtype=tf.string)}

        def foo(inputs):
            initializer = tf.lookup.TextFileInitializer(
                vocabulary_file,
                key_dtype=tf.string,
                key_index=tf.lookup.TextFileIndex.WHOLE_LINE,
                value_dtype=tf.int64,
                value_index=tf.lookup.TextFileIndex.LINE_NUMBER)
            table = tf.lookup.StaticHashTable(initializer, default_value=12)
            return {'output': table.lookup(inputs['input'])}

        export_path = _create_test_saved_model(exported_in_tf_1, input_specs,
                                               foo)

        # Load it and save it again repeatedly, verifying that the asset collections
        # remain valid.
        for it in [1, 2, 3]:
            input_string = tf.constant('dog')
            inputs = {'input': input_string}
            saved_model_loader = saved_transform_io_v2.SavedModelLoader(
                export_path)
            outputs = saved_model_loader.apply_transform_model(inputs)
            self.assertEqual(12, outputs['output'])

            new_export_path = os.path.join(tempfile.mkdtemp(),
                                           'export_' + str(it))
            tf.saved_model.save(saved_model_loader._imported, new_export_path)
            shutil.rmtree(export_path)
            export_path = new_export_path
Exemple #9
0
    def test_ragged_roundtrip(self):
        if not hasattr(meta_graph_pb2.TensorInfo, 'CompositeTensor'):
            self.skipTest('This version of TensorFlow does not support '
                          'CompositeTenors in TensorInfo.')
        export_path = os.path.join(tempfile.mkdtemp(), 'export')

        with tf.compat.v1.Graph().as_default():
            with tf.compat.v1.Session().as_default() as session:
                input_float = tf.compat.v1.ragged.placeholder(tf.float32,
                                                              ragged_rank=1,
                                                              value_shape=[])
                output = input_float / 2.0
                inputs = {'input': input_float}
                outputs = {'output': output}
                saved_transform_io.write_saved_transform_from_session(
                    session, inputs, outputs, export_path)

        splits = np.array([0, 2, 3], dtype=np.int64)
        values = np.array([1.0, 2.0, 4.0], dtype=np.float32)
        input_ragged = tf.RaggedTensor.from_row_splits(values, splits)

        # Using a computed input gives confidence that the graphs are fused
        inputs = {'input': input_ragged * 10}
        saved_model_loader = saved_transform_io_v2.SavedModelLoader(
            export_path)
        outputs = saved_model_loader.apply_v1_transform_model_in_v2(inputs)
        result = outputs['output']
        self.assertIsInstance(result, tf.RaggedTensor)

        # indices and shape unchanged; values multipled by 10 and divided by 2
        self.assertAllEqual(splits, result.row_splits)
        self.assertEqual([5.0, 10.0, 20.0], result.values.numpy().tolist())
Exemple #10
0
    def test_sparse_roundtrip(self):
        export_path = os.path.join(tempfile.mkdtemp(), 'export')

        with tf.compat.v1.Graph().as_default():
            with tf.compat.v1.Session().as_default() as session:
                input_float = tf.compat.v1.sparse_placeholder(tf.float32)
                output = input_float / 5.0
                inputs = {'input': input_float}
                outputs = {'output': output}
                saved_transform_io.write_saved_transform_from_session(
                    session, inputs, outputs, export_path)

        indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
        values = np.array([1.0, 2.0], dtype=np.float32)
        shape = np.array([7, 9, 2], dtype=np.int64)
        input_sparse = tf.SparseTensor(indices=indices,
                                       values=values,
                                       dense_shape=shape)

        # Using a computed input gives confidence that the graphs are fused
        inputs = {'input': input_sparse * 10}
        saved_model_loader = saved_transform_io_v2.SavedModelLoader(
            export_path)
        outputs = saved_model_loader.apply_v1_transform_model_in_v2(inputs)
        result = outputs['output']
        self.assertIsInstance(result, tf.SparseTensor)

        # indices and shape unchanged; values multiplied by 10 and divided by 5
        self.assertEqual(indices.tolist(), result.indices.numpy().tolist())
        self.assertEqual([2.0, 4.0], result.values.numpy().tolist())
        self.assertEqual(shape.tolist(), result.dense_shape.numpy().tolist())
Exemple #11
0
 def __init__(self, tft_output):
     super(TransformFeaturesLayer, self).__init__(trainable=False)
     self._check_tensorflow_version()
     self._tft_output = tft_output
     self._saved_model_loader = saved_transform_io_v2.SavedModelLoader(
         tft_output.transform_savedmodel_dir)
     # The model must be tracked by assigning to an attribute of the Keras layer.
     # Hence, we track the attributes of _saved_model_loader here as well.
     self._saved_model_loader_tracked_dict = self._saved_model_loader.__dict__
Exemple #12
0
    def setUpClass(cls):
        test_case.skip_if_not_tf2('Tensorflow 2.x required.')
        input_specs = {
            'x': tf.TensorSpec([
                None,
            ], dtype=tf.float32)
        }

        def foo(inputs):
            output = (inputs['x'] - 2.0) / 5.0
            return {'x_scaled': output}

        cls._saved_model_path_v1 = _create_test_saved_model(
            True, input_specs, foo, 'export_v1')
        cls._saved_model_loader_v1 = saved_transform_io_v2.SavedModelLoader(
            cls._saved_model_path_v1)
        cls._saved_model_path_v2 = _create_test_saved_model(
            False, input_specs, foo, 'export_v2')
        cls._saved_model_loader_v2 = saved_transform_io_v2.SavedModelLoader(
            cls._saved_model_path_v2)
 def __init__(self, tft_output):
   super(TransformFeaturesLayer, self).__init__(trainable=False)
   self._tft_output = tft_output
   # TODO(b/160294509): Use tf.compat.v1 when we stop supporting TF 1.15.
   if ops.executing_eagerly_outside_functions():
     self._check_tensorflow_version()
     self._saved_model_loader = saved_transform_io_v2.SavedModelLoader(
         tft_output.transform_savedmodel_dir)
     # The model must be tracked by assigning to an attribute of the Keras
     # layer. Hence, we track the attributes of _saved_model_loader here as
     # well.
     self._saved_model_loader_tracked_dict = self._saved_model_loader.__dict__
Exemple #14
0
    def benchmarkTF2RunMetagraphDoFnAtTFLevel(self):
        """Benchmark RunMetaGraphDoFn at the TF level for TFT's TF2 implementation.

    Benchmarks the parts of RunMetaGraphDoFn that involve feeding and
    fetching from the TFT SavedModel. Records the wall time taken.

    Note that this benchmark necessarily duplicates code directly from TFT
    since it's benchmarking the low-level internals of TFT, which are not
    exposed for use in this way.
    """
        common_variables = _get_common_variables(self._dataset,
                                                 force_tf_compat_v1=False)
        tensor_adapter_config = common_variables.tfxio.TensorAdapterConfig()

        # This block copied from _GraphStateV2.__init__
        saved_model_loader = saved_transform_io_v2.SavedModelLoader(
            self._dataset.tft_saved_model_path(force_tf_compat_v1=False))
        callable_get_outputs = saved_model_loader.apply_transform_model
        # We ignore the schema, and assume there are no excluded outputs.
        outputs_tensor_keys = set(saved_model_loader.structured_outputs.keys())
        saved_model_loader.finalize(
            tensor_adapter_config.tensor_representations.keys(),
            outputs_tensor_keys)

        batch_size, batched_records = _get_batched_records(
            self._dataset,
            force_tf_compat_v1=False,
            max_num_examples=self._max_num_examples())

        input_tensor_adapter = tensor_adapter.TensorAdapter(
            tensor_adapter_config)

        # This block copied from _RunMetaGraphDoFn._handle_batch
        start = time.time()
        for batch in batched_records:
            feed_dict = input_tensor_adapter.ToBatchTensors(
                batch, produce_eager_tensors=True)
            _ = callable_get_outputs(feed_dict)
        end = time.time()
        delta = end - start

        self.report_benchmark(
            iters=1,
            wall_time=delta,
            extras={
                "batch_size":
                batch_size,
                "num_examples":
                self._dataset.num_examples(limit=self._max_num_examples())
            })
Exemple #15
0
    def test_dense_roundtrip(self, exported_in_tf1):
        input_specs = {'input': tf.TensorSpec([], dtype=tf.float32)}

        def foo(inputs):
            return {'output': inputs['input'] / 5.0}

        export_path = _create_test_saved_model(exported_in_tf1, input_specs,
                                               foo)

        # Using a computed input gives confidence that the graphs are fused.
        input_float = tf.constant(25.0) * 2
        inputs = {'input': input_float}
        saved_model_loader = saved_transform_io_v2.SavedModelLoader(
            export_path)
        outputs = saved_model_loader.apply_transform_model(inputs)
        # (25 * 2) / 5 = 10
        self.assertEqual(10.0, outputs['output'].numpy())
Exemple #16
0
    def test_finalize(self):
        input_keys = ['x']
        output_keys = ['x_scaled']

        input_specs = {
            'x': tf.TensorSpec([
                None,
            ], dtype=tf.float32),
            'y': tf.TensorSpec([
                None,
            ], dtype=tf.float32)
        }

        def preprocessing_fn(inputs):
            output = (inputs['x'] - 2.0) / 5.0
            return {
                'x_scaled': output,
                'x_in': inputs['x'],
                'y': inputs['y'] + 1
            }

        export_path = _create_test_saved_model(False,
                                               input_specs,
                                               preprocessing_fn,
                                               base_dir=self.get_temp_dir())
        saved_model_loader = saved_transform_io_v2.SavedModelLoader(
            export_path)

        input_features = {'x': tf.constant([1237.0])}  # tf.float32
        transformed_features = (
            saved_model_loader.apply_transform_model(input_features))
        self.assertCountEqual(['x_in', 'x_scaled'], list(transformed_features))
        self.assertAllEqual(transformed_features['x_scaled'].numpy(), [247.0])
        self.assertAllEqual(transformed_features['x_in'].numpy(), [1237.0])

        # Since `finalize` is not thread-safe it is not recommended to call it after
        # `apply_transform_model` has already been invoked. This is only for unit
        # testing behavior differences.
        saved_model_loader.finalize(input_keys, output_keys)
        transformed_features = (
            saved_model_loader.apply_transform_model(input_features))
        self.assertEqual(['x_scaled'], list(transformed_features))
        self.assertAllEqual(transformed_features['x_scaled'].numpy(), [247.0])
Exemple #17
0
    def test_dense_roundtrip(self):
        export_path = os.path.join(tempfile.mkdtemp(), 'export')

        with tf.compat.v1.Graph().as_default():
            with tf.compat.v1.Session().as_default() as session:
                input_float = tf.compat.v1.placeholder(tf.float32)
                # show that unrelated & unmapped placeholders do not interfere
                tf.compat.v1.placeholder(tf.int64)
                output = input_float / 5.0
                inputs = {'input': input_float}
                outputs = {'output': output}
                saved_transform_io.write_saved_transform_from_session(
                    session, inputs, outputs, export_path)

        # Using a computed input gives confidence that the graphs are fused.
        input_float = tf.constant(25.0) * 2
        inputs = {'input': input_float}
        saved_model_loader = saved_transform_io_v2.SavedModelLoader(
            export_path)
        outputs = saved_model_loader.apply_v1_transform_model_in_v2(inputs)
        # (25 * 2) / 5 = 10
        self.assertEqual(10.0, outputs['output'].numpy())
Exemple #18
0
    def transform_raw_features(self, raw_features, drop_unused_features=False):
        """Takes a dict of tensors representing raw features and transforms them.

    Takes a dictionary of `Tensor`s or `SparseTensor`s that represent the raw
    features, and applies the transformation defined by tf.Transform.

    By default it returns all transformed features defined by tf.Transform. To
    only return features transformed from the given 'raw_features', set
    `drop_unused_features` to True.

    Args:
      raw_features: A dict whose keys are feature names and values are `Tensor`s
        or `SparseTensor`s.
      drop_unused_features: If True, the result will be filtered. Only the
        features that are transformed from 'raw_features' will be included in
        the returned result. If a feature is transformed from multiple raw
        features (e.g, feature cross), it will only be included if all its base
        raw features are present in `raw_features`.

    Returns:
      A dict whose keys are feature names and values are `Tensor`s or
          `SparseTensor`s representing transformed features.
    """
        if ops.executing_eagerly_outside_functions():
            if self._v2_saved_model_loader is None:
                self._v2_saved_model_loader = saved_transform_io_v2.SavedModelLoader(
                    self.transform_savedmodel_dir)

        if (self._v2_saved_model_loader is not None
                and not self._v2_saved_model_loader.load_v2_in_compat):
            if not drop_unused_features:
                tf.compat.v1.logging.warning(
                    'Unused features are always dropped in the TF 2.x '
                    'implementation. Ignoring value of drop_unused_features.')
            return self._v2_saved_model_loader.apply_transform_model(
                raw_features)
        else:
            return self._transform_raw_features_compat_v1(
                raw_features, drop_unused_features)
Exemple #19
0
 def setUpClass(cls):
     tft_test_case.skip_if_not_tf2('Tensorflow 2.x required.')
     cls._test_saved_model = _create_test_saved_model()
     cls._saved_model_loader = saved_transform_io_v2.SavedModelLoader(
         cls._test_saved_model)
Exemple #20
0
 def _get_saved_model_loader(self, exported_in_tf1):
   if exported_in_tf1:
     return saved_transform_io_v2.SavedModelLoader(self._saved_model_path_v1)
   return saved_transform_io_v2.SavedModelLoader(self._saved_model_path_v2)
Exemple #21
0
  def test_re_export_tf2_saved_model_to_tf1(self,
                                            preprocessing_fn_getter,
                                            expected_output,
                                            test_input,
                                            asset_file_contents=None):

    asset_file = None
    if asset_file_contents is not None:
      asset_file_path = os.path.join(
          tempfile.mkdtemp(dir=self.get_temp_dir()), 'asset')
      file_io.write_string_to_file(asset_file_path, asset_file_contents)
      asset_file = tf.constant(asset_file_path)

    input_specs = {'input': tf.TensorSpec([], dtype=tf.string)}
    export_path = _create_test_saved_model(
        False,
        input_specs,
        preprocessing_fn_getter(asset_file),
        base_dir=self.get_temp_dir())

    if asset_file is not None:
      os.remove(asset_file.numpy())
    new_export_path = os.path.join(
        tempfile.mkdtemp(dir=self.get_temp_dir()), 'export_v1')

    builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(
        new_export_path)
    # TODO(b/175844561): Investigate why the variable names need to be different
    # for the two graph and session contexts below.
    with tf.compat.v1.Graph().as_default() as g1:
      saved_model_loader = saved_transform_io_v2.SavedModelLoader(export_path)
      if asset_file_contents is not None:
        self.assertEqual(
            1, len(g1.get_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS)))
      with tf.compat.v1.Session().as_default() as s1:
        inputs = {'input': tf.compat.v1.placeholder(tf.string)}
        outputs = saved_model_loader.apply_transform_model(inputs)
        predict_signature_def = (
            tf.compat.v1.saved_model.signature_def_utils.predict_signature_def(
                inputs, outputs))
        builder.add_meta_graph_and_variables(
            s1, ['graph_tag'],
            signature_def_map={'graph_signature': predict_signature_def},
            assets_collection=tf.compat.v1.get_collection(
                tf.compat.v1.GraphKeys.ASSET_FILEPATHS),
            main_op=tf.compat.v1.tables_initializer())
    builder.save()

    shutil.rmtree(export_path)

    with tf.compat.v1.Graph().as_default() as g2:
      with tf.compat.v1.Session().as_default() as s2:
        meta_graph_def = tf.compat.v1.saved_model.loader.load(
            s2, ['graph_tag'], new_export_path)
        signature = meta_graph_def.signature_def['graph_signature']
        output = s2.run(
            g2.get_tensor_by_name(signature.outputs['output'].name),
            feed_dict={
                g2.get_tensor_by_name(signature.inputs['input'].name):
                    test_input
            })
        self.assertEqual(expected_output, output)
        if asset_file_contents is not None:
          self.assertEqual(
              1, len(g2.get_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS)))