Exemplo n.º 1
0
  def test_saving_with_dense_features(self):
    cols = [
        tf.feature_column.numeric_column('a'),
        tf.feature_column.indicator_column(
            tf.feature_column.categorical_column_with_vocabulary_list(
                'b', ['one', 'two']))
    ]
    input_layers = {
        'a': keras.layers.Input(shape=(1,), name='a'),
        'b': keras.layers.Input(shape=(1,), name='b', dtype='string')
    }

    fc_layer = dense_features.DenseFeatures(cols)(input_layers)
    output = keras.layers.Dense(10)(fc_layer)

    model = keras.models.Model(input_layers, output)

    model.compile(
        loss=keras.losses.MSE,
        optimizer='rmsprop',
        metrics=[keras.metrics.categorical_accuracy])

    config = model.to_json()
    loaded_model = model_config.model_from_json(config)

    inputs_a = np.arange(10).reshape(10, 1)
    inputs_b = np.arange(10).reshape(10, 1).astype('str')

    with self.cached_session():
      # Initialize tables for V1 lookup.
      if not tf.executing_eagerly():
        self.evaluate(tf.compat.v1.tables_initializer())

      self.assertLen(loaded_model.predict({'a': inputs_a, 'b': inputs_b}), 10)
Exemplo n.º 2
0
  def test_nested_layers(self):

    class MyLayer(keras.layers.Layer):

      def __init__(self, sublayers, **kwargs):
        super(MyLayer, self).__init__(**kwargs)
        self.sublayers = sublayers

      def get_config(self):
        config = super(MyLayer, self).get_config()
        config['sublayers'] = self.sublayers
        return config

    layer = MyLayer([keras.layers.Dense(2, name='MyDense'),
                     RegisteredSubLayer(name='MySubLayer')])
    model = keras.Sequential([keras.Input([None]), layer])
    model_json = model.to_json()

    self.assertIn('Foo>RegisteredSubLayer', model_json)

    loaded_model = model_config.model_from_json(
        model_json, custom_objects={'MyLayer': MyLayer})
    loaded_layer = loaded_model.layers[0]
    self.assertIsInstance(loaded_layer.sublayers[0], keras.layers.Dense)
    self.assertEqual(loaded_layer.sublayers[0].name, 'MyDense')
    self.assertIsInstance(loaded_layer.sublayers[1], RegisteredSubLayer)
    self.assertEqual(loaded_layer.sublayers[1].name, 'MySubLayer')
Exemplo n.º 3
0
    def testCompositeTypeSpecArgWithoutDtype(self):
        for assign_variant_dtype in [False, True]:
            # Create a Keras Input
            spec = TwoTensorsSpecNoOneDtype(
                (1, 2, 3),
                tf.float32,
                (1, 2, 3),
                tf.int64,
                assign_variant_dtype=assign_variant_dtype,
            )
            x = input_layer_lib.Input(type_spec=spec)

            def lambda_fn(tensors):
                return tf.cast(tensors.x, tf.float64) + tf.cast(
                    tensors.y, tf.float64)

            # Verify you can construct and use a model w/ this input
            model = functional.Functional(x, core.Lambda(lambda_fn)(x))

            # And that the model works
            two_tensors = TwoTensors(
                tf.ones((1, 2, 3)) * 2.0, tf.ones(1, 2, 3))
            self.assertAllEqual(model(two_tensors), lambda_fn(two_tensors))

            # Test serialization / deserialization
            model = functional.Functional.from_config(model.get_config())
            self.assertAllEqual(model(two_tensors), lambda_fn(two_tensors))
            model = model_config.model_from_json(model.to_json())
            self.assertAllEqual(model(two_tensors), lambda_fn(two_tensors))
Exemplo n.º 4
0
    def test_saving_with_sequence_features(self):
        cols = [
            tf.feature_column.sequence_numeric_column('a'),
            tf.feature_column.indicator_column(
                tf.feature_column.
                sequence_categorical_column_with_vocabulary_list(
                    'b', ['one', 'two']))
        ]
        input_layers = {
            'a':
            keras.layers.Input(shape=(None, 1), sparse=True, name='a'),
            'b':
            keras.layers.Input(shape=(None, 1),
                               sparse=True,
                               name='b',
                               dtype='string')
        }

        fc_layer, _ = ksfc.SequenceFeatures(cols)(input_layers)
        # TODO(tibell): Figure out the right dtype and apply masking.
        # sequence_length_mask = array_ops.sequence_mask(sequence_length)
        # x = keras.layers.GRU(32)(fc_layer, mask=sequence_length_mask)
        x = keras.layers.GRU(32)(fc_layer)
        output = keras.layers.Dense(10)(x)

        model = keras.models.Model(input_layers, output)

        model.compile(loss=keras.losses.MSE,
                      optimizer='rmsprop',
                      metrics=[keras.metrics.categorical_accuracy])

        config = model.to_json()
        loaded_model = model_config.model_from_json(config)

        batch_size = 10
        timesteps = 1

        values_a = np.arange(10, dtype=np.float32)
        indices_a = np.zeros((10, 3), dtype=np.int64)
        indices_a[:, 0] = np.arange(10)
        inputs_a = tf.SparseTensor(indices_a, values_a,
                                   (batch_size, timesteps, 1))

        values_b = np.zeros(10, dtype=np.str)
        indices_b = np.zeros((10, 3), dtype=np.int64)
        indices_b[:, 0] = np.arange(10)
        inputs_b = tf.SparseTensor(indices_b, values_b,
                                   (batch_size, timesteps, 1))

        with self.cached_session():
            # Initialize tables for V1 lookup.
            if not tf.executing_eagerly():
                self.evaluate(tf.compat.v1.tables_initializer())

            self.assertLen(
                loaded_model.predict({
                    'a': inputs_a,
                    'b': inputs_b
                }, steps=1), batch_size)
Exemplo n.º 5
0
 def test_json_serialization(self):
   inputs = keras.Input(shape=(4,), dtype='uint8')
   outputs = tf.cast(inputs, 'float32') / 4.
   model = model_config.model_from_json(keras.Model(inputs, outputs).to_json())
   self.assertAllEqual(
       self.evaluate(model(np.array([0, 64, 128, 192], np.uint8))),
       [0., 16., 32., 48.])
   model.summary()
Exemplo n.º 6
0
def load_from_saved_model(saved_model_path, custom_objects=None):
    """Loads a keras Model from a SavedModel created by `export_saved_model()`.

  This function reinstantiates model state by:
  1) loading model topology from json (this will eventually come
     from metagraph).
  2) loading model weights from checkpoint.

  Example:

  ```python
  import tensorflow as tf

  # Create a tf.keras model.
  model = tf.keras.Sequential()
  model.add(tf.keras.layers.Dense(1, input_shape=[10]))
  model.summary()

  # Save the tf.keras model in the SavedModel format.
  path = '/tmp/simple_keras_model'
  tf.keras.experimental.export_saved_model(model, path)

  # Load the saved keras model back.
  new_model = tf.keras.experimental.load_from_saved_model(path)
  new_model.summary()
  ```

  Args:
    saved_model_path: a string specifying the path to an existing SavedModel.
    custom_objects: Optional dictionary mapping names
        (strings) to custom classes or functions to be
        considered during deserialization.

  Returns:
    a keras.Model instance.
  """
    warnings.warn(
        '`tf.keras.experimental.load_from_saved_model` is deprecated'
        'and will be removed in a future version. '
        'Please switch to `tf.keras.models.load_model`.',
        stacklevel=2)
    # restore model topology from json string
    model_json_filepath = tf.io.gfile.join(
        tf.compat.as_bytes(saved_model_path),
        tf.compat.as_bytes(tf.saved_model.ASSETS_DIRECTORY),
        tf.compat.as_bytes(SAVED_MODEL_FILENAME_JSON))
    with tf.io.gfile.GFile(model_json_filepath, 'r') as f:
        model_json = f.read()
    model = model_config.model_from_json(model_json,
                                         custom_objects=custom_objects)

    # restore model weights
    checkpoint_prefix = tf.io.gfile.join(
        tf.compat.as_text(saved_model_path),
        tf.compat.as_text(tf.saved_model.VARIABLES_DIRECTORY),
        tf.compat.as_text(tf.saved_model.VARIABLES_FILENAME))
    model.load_weights(checkpoint_prefix)
    return model
Exemplo n.º 7
0
    def testTypeSpecArg(self):
        # Create a Keras Input
        x = input_layer_lib.Input(type_spec=tf.TensorSpec((7, 32), tf.float32))
        self.assertAllEqual(x.shape.as_list(), [7, 32])

        # Verify you can construct and use a model w/ this input
        model = functional.Functional(x, x * 2.0)
        self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0)

        # Test serialization / deserialization
        model = functional.Functional.from_config(model.get_config())
        self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0)

        model = model_config.model_from_json(model.to_json())
        self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0)
Exemplo n.º 8
0
    def testCompositeTypeSpecArg(self):
        # Create a Keras Input
        rt = tf.RaggedTensor.from_row_splits(values=[3, 1, 4, 1, 5, 9, 2, 6],
                                             row_splits=[0, 4, 4, 7, 8, 8])
        x = input_layer_lib.Input(type_spec=rt._type_spec)

        # Verify you can construct and use a model w/ this input
        model = functional.Functional(x, x * 2)

        # And that the model works
        rt = tf.RaggedTensor.from_row_splits(values=[3, 21, 4, 1, 53, 9, 2, 6],
                                             row_splits=[0, 4, 4, 7, 8, 8])
        self.assertAllEqual(model(rt), rt * 2)

        # Test serialization / deserialization
        model = functional.Functional.from_config(model.get_config())
        self.assertAllEqual(model(rt), rt * 2)
        model = model_config.model_from_json(model.to_json())
        self.assertAllEqual(model(rt), rt * 2)