Beispiel #1
0
  def test_sequential_model_saving_without_input_shape(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()
    with self.cached_session():
      model = keras.models.Sequential()
      model.add(keras.layers.Dense(2))
      model.add(keras.layers.RepeatVector(3))
      model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
      model.compile(
          loss=keras.losses.MSE,
          optimizer='rmsprop',
          metrics=[
              keras.metrics.categorical_accuracy,
              keras.metrics.CategoricalAccuracy(name='cat_acc')
          ],
          weighted_metrics=[
              keras.metrics.categorical_accuracy,
              keras.metrics.CategoricalAccuracy(name='cat_acc2')
          ],
          sample_weight_mode='temporal')
      x = np.random.random((1, 3))
      y = np.random.random((1, 3, 3))
      model.train_on_batch(x, y)

      out = model.predict(x)
      model.save(saved_model_dir, save_format=save_format)

      new_model = keras.models.load_model(saved_model_dir)

      self._assert_same_weights_and_metrics(model, new_model)

      out2 = new_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)
Beispiel #2
0
  def _assert_same_weights_and_metrics(self, model, loaded_model):
    """Checks that the loaded weights and metrics are the same as the original.

    Args:
      model: original model
      loaded_model: loaded model
    """
    self.assertAllClose(model.weights, loaded_model.weights)

    if loaded_model.optimizer:
      if test_utils.get_save_format() == 'tf':
        # TODO(b/153110928): Keras TF format doesn't restore optimizer weights
        # currently.
        return
      self.assertAllClose(model.optimizer.weights,
                          loaded_model.optimizer.weights)

    # In V1/Graph mode, the model isn't built, so the metrics are not loaded
    # immediately (requires model to be called on some data before building
    # metrics).
    check_metrics = tf.__internal__.tf2.enabled() and tf.executing_eagerly()

    if check_metrics:
      self.assertAllEqual([m.name for m in model.metrics],
                          [m.name for m in loaded_model.metrics])
Beispiel #3
0
  def test_functional_model_with_getitem_op_layer(self):
    inp = keras.Input(shape=(8))

    out = inp[:]
    model = keras.Model(
        inputs=[inp],
        outputs=out)
    batch_size = 7
    x = tf.stack([
        tf.range(8) for _ in range(batch_size)])
    args = [x]
    expected = x[:]

    self.assertAllEqual(model(args), expected)
    self.assertAllEqual(model.predict(args, batch_size=batch_size), expected)

    # Make sure it can be successfully saved and loaded.
    save_format = test_utils.get_save_format()
    saved_model_dir = self._save_model_dir()
    keras.models.save_model(model, saved_model_dir, save_format=save_format)

    loaded_model = keras.models.load_model(saved_model_dir)

    self.assertAllEqual(loaded_model(args), expected)
    self.assertAllEqual(loaded_model.predict(args, batch_size=batch_size),
                        expected)
Beispiel #4
0
    def test_basic_saving_and_loading(self, model_fn):
        save_format = test_utils.get_save_format()
        custom_objects = self.get_custom_objects()
        if "subclassed_in_functional" in model_fn.__name__:
            subclass_custom_objects = {
                "MySubclassModel": model_architectures.MySubclassModel,
            }
            custom_objects.update(subclass_custom_objects)
        elif "subclassed" in model_fn.__name__ and save_format == "h5":
            self.skipTest(
                "Saving the model to HDF5 format requires the model to be "
                "a Functional model or a Sequential model."
            )

        saved_model_dir = self._save_model_dir()
        model_data = model_fn()
        model = model_data.model
        x_test, y_test = self.get_test_data(
            model_data.input_shape, model_data.target_shape
        )
        model.compile("rmsprop", "mse")
        model.train_on_batch(x_test, y_test)

        # Save model.
        out1 = model.predict(x_test)
        keras.models.save_model(model, saved_model_dir, save_format=save_format)
        # Load model.
        loaded_model = keras.models.load_model(
            saved_model_dir, custom_objects=custom_objects
        )
        out2 = loaded_model.predict(x_test)

        self.assertAllClose(out1, out2, atol=1e-05)
Beispiel #5
0
    def test_weight_loading(self):
        saved_model_dir = self._save_model_dir()
        save_format = test_utils.get_save_format()
        with self.cached_session():
            a = keras.layers.Input(shape=(2, ))
            x = keras.layers.Dense(3)(a)
            b = keras.layers.Dense(1)(x)
            model = keras.models.Model(a, b)

            x = np.random.random((3, 2))
            ref_y = model.predict(x)
            weights = model.get_weights()
            model.set_weights(weights)
            y = model.predict(x)
            self.assertAllClose(ref_y, y)

            with self.assertRaises(ValueError):
                model.set_weights(weights[1:])
            with self.assertRaises(ValueError):
                model.set_weights(weights[::-1])

            model.save_weights(saved_model_dir, save_format=save_format)
            model.load_weights(saved_model_dir)
            y = model.predict(x)
            self.assertAllClose(ref_y, y)
Beispiel #6
0
  def test_sequential_model_saving_2(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()

    with tf.Graph().as_default(), self.cached_session():
      # test with custom optimizer, loss

      class CustomOp(optimizer_v1.RMSprop):
        pass

      def custom_loss(y_true, y_pred):
        return keras.losses.mse(y_true, y_pred)

      model = keras.models.Sequential()
      model.add(keras.layers.Dense(2, input_shape=(3,)))
      model.add(keras.layers.Dense(3))
      model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])

      x = np.random.random((1, 3))
      y = np.random.random((1, 3))
      model.train_on_batch(x, y)

      out = model.predict(x)
      keras.models.save_model(model, saved_model_dir, save_format=save_format)

      new_model = keras.models.load_model(
          saved_model_dir,
          custom_objects={'CustomOp': CustomOp,
                          'custom_loss': custom_loss})
      self._assert_same_weights_and_metrics(model, new_model)

      out2 = new_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)
Beispiel #7
0
  def test_nested_model_weight_loading(self):
    save_format = test_utils.get_save_format()
    saved_model_dir = self._save_model_dir()

    batch_size = 5
    shape = (None, None, 3)

    with self.cached_session():
      def gen_model():

        def seq_model():
          model = keras.models.Sequential([
              keras.layers.Conv2D(3, 1, input_shape=shape),
              keras.layers.BatchNormalization()])
          return model

        x = inner_inputs = keras.layers.Input((None, None, 3))
        x = seq_model()(x)
        x = seq_model()(x)
        inner_model = keras.models.Model(inner_inputs, x)

        inputs = keras.layers.Input(shape)
        return keras.models.Model(inputs, inner_model(inputs))

      model = gen_model()
      x = np.random.random((batch_size, 1, 1, 3))
      ref_y = model.predict(x)

      model.save_weights(saved_model_dir, save_format=save_format)

      model = gen_model()
      model.load_weights(saved_model_dir)
      y = model.predict(x)

      self.assertAllClose(y, ref_y)
Beispiel #8
0
  def test_warning_when_saving_invalid_custom_mask_layer(self):

    class MyMasking(keras.layers.Layer):

      def call(self, inputs):
        return inputs

      def compute_mask(self, inputs, mask=None):
        mask = tf.not_equal(inputs, 0)
        return mask

    class MyLayer(keras.layers.Layer):

      def call(self, inputs, mask=None):
        return tf.identity(inputs)

    samples = np.random.random((2, 2))
    model = keras.Sequential([MyMasking(), MyLayer()])
    model.predict(samples)
    with warnings.catch_warnings(record=True) as w:
      model.save(self._save_model_dir(), test_utils.get_save_format())
    self.assertIn(generic_utils.CustomMaskWarning,
                  {warning.category for warning in w})

    # Test that setting up a custom mask correctly does not issue a warning.
    class MyCorrectMasking(keras.layers.Layer):

      def call(self, inputs):
        return inputs

      def compute_mask(self, inputs, mask=None):
        mask = tf.not_equal(inputs, 0)
        return mask

      # This get_config doesn't actually do anything because our mask is
      # static and doesn't need any external information to work. We do need a
      # dummy get_config method to prevent the warning from appearing, however.
      def get_config(self, *args, **kwargs):
        return {}

    model = keras.Sequential([MyCorrectMasking(), MyLayer()])
    model.predict(samples)
    with warnings.catch_warnings(record=True) as w:
      model.save(self._save_model_dir(), test_utils.get_save_format())
    self.assertNotIn(generic_utils.CustomMaskWarning,
                     {warning.category for warning in w})
Beispiel #9
0
  def test_saving_without_compilation(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()
    model = keras.models.Sequential()
    model.add(keras.layers.Dense(2, input_shape=(3,)))
    model.add(keras.layers.Dense(3))
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])

    keras.models.save_model(model, saved_model_dir, save_format=save_format)
    model = keras.models.load_model(saved_model_dir)
Beispiel #10
0
 def test_saving_right_after_compilation(self):
   saved_model_dir = self._save_model_dir()
   save_format = test_utils.get_save_format()
   with self.cached_session():
     model = keras.models.Sequential()
     model.add(keras.layers.Dense(2, input_shape=(3,)))
     model.add(keras.layers.Dense(3))
     model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
     if not tf.compat.v1.executing_eagerly_outside_functions():
       model._make_train_function()
     keras.models.save_model(model, saved_model_dir, save_format=save_format)
     model = keras.models.load_model(saved_model_dir)
Beispiel #11
0
  def test_saving_with_tf_optimizer(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()

    model = keras.models.Sequential()
    model.add(keras.layers.Dense(2, input_shape=(3,)))
    model.add(keras.layers.Dense(3))
    model.compile(loss='mse',
                  optimizer=tf.compat.v1.train.AdadeltaOptimizer(0.1),
                  metrics=['acc'])

    keras.models.save_model(model, saved_model_dir, save_format=save_format)
    model = keras.models.load_model(saved_model_dir)
Beispiel #12
0
  def test_save_and_load(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()
    save_kwargs = test_utils.get_save_kwargs()

    if ((save_format == 'h5' or not save_kwargs.get('save_traces', True)) and
        test_utils.get_model_type() == 'subclass'):
      # HDF5 format currently does not allow saving subclassed models.
      # When saving with `save_traces=False`, the subclassed model must have a
      # get_config/from_config, which the autogenerated model does not have.
      return

    with self.cached_session():
      model = test_utils.get_model_from_layers(
          [keras.layers.Dense(2),
           keras.layers.RepeatVector(3),
           keras.layers.TimeDistributed(keras.layers.Dense(3))],
          input_shape=(3,))
      model.compile(
          loss=keras.losses.MSE,
          optimizer=keras.optimizers.optimizer_v2.rmsprop.RMSprop(lr=0.0001),
          metrics=[
              keras.metrics.categorical_accuracy,
              keras.metrics.CategoricalCrossentropy(
                  name='cce', label_smoothing=tf.constant(0.2)),
          ],
          weighted_metrics=[
              keras.metrics.categorical_crossentropy,
              keras.metrics.CategoricalCrossentropy(
                  name='cce', label_smoothing=tf.constant(0.2)),
          ],
          sample_weight_mode='temporal')

      x = np.random.random((1, 3))
      y = np.random.random((1, 3, 3))
      model.train_on_batch(x, y)

      out = model.predict(x)
      keras.models.save_model(
          model, saved_model_dir, save_format=save_format,
          **save_kwargs)

      loaded_model = keras.models.load_model(saved_model_dir)
      self._assert_same_weights_and_metrics(model, loaded_model)

      out2 = loaded_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)

      eval_out = model.evaluate(x, y)
      eval_out2 = loaded_model.evaluate(x, y)
      self.assertArrayNear(eval_out, eval_out2, 0.001)
Beispiel #13
0
  def test_primitive_attrs_contain_no_extraneous_strings(self):
    if h5py is None:
      self.skipTest('h5py required to run this test')

    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()
    model = keras.models.Sequential()
    model.add(keras.layers.Dense(1, input_shape=[2]))
    model.save(saved_model_dir, save_format=save_format)
    if save_format in ['tf', 'tensorflow']:
      return

    h5file = h5py.File(saved_model_dir, 'r')
    self.assertRegex(h5file.attrs['keras_version'], r'^[\d]+\.[\d]+\.[\S]+$')
Beispiel #14
0
  def test_saving_constant_initializer_with_numpy(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()

    model = keras.models.Sequential()
    model.add(
        keras.layers.Dense(
            2,
            input_shape=(3,),
            kernel_initializer=keras.initializers.Constant(np.ones((3, 2)))))
    model.add(keras.layers.Dense(3))
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
    keras.models.save_model(model, saved_model_dir, save_format=save_format)
    model = keras.models.load_model(saved_model_dir)
Beispiel #15
0
  def test_model_saving_to_pre_created_h5py_file(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()
    with tf.Graph().as_default(), self.cached_session():
      inputs = keras.Input(shape=(3,))
      x = keras.layers.Dense(2)(inputs)
      outputs = keras.layers.Dense(3)(x)

      model = keras.Model(inputs, outputs)
      model.compile(
          loss=keras.losses.MSE,
          optimizer=optimizer_v1.Adam(),
          metrics=[
              keras.metrics.categorical_accuracy,
              keras.metrics.CategoricalAccuracy()
          ])
      x = np.random.random((1, 3))
      y = np.random.random((1, 3))
      model.train_on_batch(x, y)

      out = model.predict(x)

      keras.models.save_model(model, saved_model_dir, save_format=save_format)
      loaded_model = keras.models.load_model(saved_model_dir)
      out1 = loaded_model.predict(x)
      self.assertAllClose(out, out1, atol=1e-05)
      if save_format in ['tf', 'tensorflow']:
        return

      # Test h5 format specifically
      fd, fname = tempfile.mkstemp('.h5')
      with h5py.File(fname, mode='r+') as h5file:
        keras.models.save_model(model, h5file)
        loaded_model = keras.models.load_model(h5file)
        out2 = loaded_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)

      # Test non-default options in h5
      with h5py.File(
          '_', driver='core', mode='w', backing_store=False) as h5file:
        keras.models.save_model(model, h5file)
        loaded_model = keras.models.load_model(h5file)
        out2 = loaded_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)

      # Cleanup
      os.close(fd)
      os.remove(fname)
Beispiel #16
0
  def test_save_uncompiled_model_with_optimizer(self):
    with self.cached_session() as session:
      saved_model_dir = self._save_model_dir()
      save_format = test_utils.get_save_format()
      model = keras.models.Sequential([keras.layers.Dense(1, input_shape=(3,))])
      # Set the model's optimizer but don't compile. This can happen if the
      # model is trained with a custom training loop.
      model.optimizer = keras.optimizers.optimizer_v2.rmsprop.RMSprop(lr=0.0001)
      if not tf.executing_eagerly():
        session.run([v.initializer for v in model.variables])
      model.save(saved_model_dir, save_format=save_format)

      if save_format in ['tf', 'tensorflow']:
        loaded = keras.models.load_model(saved_model_dir)
        self.assertIsInstance(
            loaded.optimizer,
            keras.optimizers.optimizer_v2.optimizer_v2.OptimizerV2)
Beispiel #17
0
  def test_shared_objects_wrapper(self):
    """Tests that shared layers wrapped with `Wrapper` restore correctly."""
    input_ = keras.Input(shape=(1,))
    unwrapped = keras.layers.Layer(name='unwrapped')
    wrapped = keras.layers.Wrapper(unwrapped, name='wrapped')
    model = keras.Model(inputs=input_,
                        outputs=[unwrapped(input_), wrapped(input_)])

    # Test recreating directly from config
    config = model.get_config()
    loaded = keras.Model.from_config(config)
    self.assertIs(loaded.layers[1], loaded.layers[2].layer)

    # Test saving and loading to disk
    save_format = test_utils.get_save_format()
    saved_model_dir = self._save_model_dir()
    keras.models.save_model(model, saved_model_dir, save_format=save_format)
    loaded = keras.models.load_model(saved_model_dir)
    self.assertIs(loaded.layers[1], loaded.layers[2].layer)
Beispiel #18
0
  def test_multi_output_metrics_name_stay_same(self, fit):
    """Tests that metric names don't change with each save/load cycle.

    e.g. "head_0_accuracy" should not become "head_0_head_0_accuracy" after
    saving and loading a model.

    Arguments:
      fit: Whether the model should be fit before saving.
    """
    # This doesn't work at all, so we can't check whether metric names are
    # correct.
    if not tf.executing_eagerly() and not fit:
      self.skipTest('b/181767784')

    input_ = keras.Input((4,))
    model = keras.Model(
        input_,
        [keras.layers.Softmax(name='head_0')(keras.layers.Dense(3)(input_)),
         keras.layers.Softmax(name='head_1')(keras.layers.Dense(5)(input_))])
    metric = keras.metrics.BinaryAccuracy()
    model.compile(optimizer='rmsprop',
                  loss='mse',
                  metrics={'head_0': [metric, 'accuracy']})

    x = np.random.rand(2, 4)
    y = {'head_0': np.random.randint(2, size=(2, 3)),
         'head_1': np.random.randint(2, size=(2, 5))}

    # Make sure metrix prefixing works the same regardless of whether the user
    # has fit the model before saving.
    if fit:
      model.fit(x, y, verbose=0)

    # Save and reload.
    save_format = test_utils.get_save_format()
    saved_model_dir = self._save_model_dir()
    keras.models.save_model(model, saved_model_dir, save_format=save_format)
    loaded = keras.models.load_model(saved_model_dir)

    # Make sure the metrics names from the model before saving match the loaded
    # model.
    self.assertSequenceEqual(model.metrics_names, loaded.metrics_names)
Beispiel #19
0
  def test_sequential_model_saving_without_compile(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()
    with self.cached_session():
      model = keras.models.Sequential()
      model.add(keras.layers.Dense(2, input_shape=(3,)))
      model.add(keras.layers.RepeatVector(3))
      model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))

      x = np.random.random((1, 3))
      out = model.predict(x)

      # Save the model without any compilation or training.
      keras.models.save_model(model, saved_model_dir, save_format=save_format)

      new_model = keras.models.load_model(saved_model_dir)
      self._assert_same_weights_and_metrics(model, new_model)

      out2 = new_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)
Beispiel #20
0
  def test_model_saving_to_new_dir_path(self):
    saved_model_dir = os.path.join(self._save_model_dir(), 'newdir',
                                   'saved_model')
    save_format = test_utils.get_save_format()

    with self.cached_session():
      model = keras.models.Sequential()
      model.add(keras.layers.Dense(2, input_shape=(3,)))
      model.add(keras.layers.RepeatVector(3))
      model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))

      x = np.random.random((1, 3))
      out = model.predict(x)

      keras.models.save_model(model, saved_model_dir, save_format=save_format)

      new_model = keras.models.load_model(saved_model_dir)
      self._assert_same_weights_and_metrics(model, new_model)

      out2 = new_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)
Beispiel #21
0
  def test_saving_lambda_numpy_array_arguments(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()

    if h5py is None:
      self.skipTest('h5py required to run this test')

    mean = np.random.random((4, 2, 3))
    std = np.abs(np.random.random((4, 2, 3))) + 1e-5
    inputs = keras.layers.Input(shape=(4, 2, 3))
    output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
                                 arguments={'mu': mean, 'std': std})(inputs)
    model = keras.models.Model(inputs, output)
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])

    keras.models.save_model(model, saved_model_dir, save_format=save_format)

    model = keras.models.load_model(saved_model_dir)

    self.assertAllClose(mean, model.layers[1].arguments['mu'])
    self.assertAllClose(std, model.layers[1].arguments['std'])
Beispiel #22
0
  def test_saving_model_with_long_weights_names(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()

    with self.cached_session():
      x = keras.Input(shape=(2,), name='nested_model_input')
      f = x
      for i in range(4):
        f = keras.layers.Dense(2, name='nested_model_dense_%d' % (i,))(f)
      # This layer name will make the `weights_name`
      # HDF5 attribute blow out of proportion.
      f = keras.layers.Dense(2, name='nested_model_output' + ('x' * (2**14)))(f)
      nested_model = keras.Model(inputs=[x], outputs=[f], name='nested_model')

      x = keras.Input(shape=(2,), name='outer_model_input')
      f = nested_model(x)
      f = keras.layers.Dense(2, name='outer_model_output')(f)

      model = keras.Model(inputs=[x], outputs=[f])
      model.compile(loss='mse', optimizer='adam', metrics=['acc'])

      x = np.random.random((1, 2))
      y = np.random.random((1, 2))
      model.train_on_batch(x, y)
      out = model.predict(x)

      keras.models.save_model(model, saved_model_dir, save_format=save_format)
      model = keras.models.load_model(saved_model_dir)

      if save_format in ['h5', 'hdf5', 'keras']:
        # Check that the HDF5 files contains chunked array
        # of weight names.
        with h5py.File(saved_model_dir, 'r') as h5file:
          num_weight_arrays = len(
              [attr for attr in h5file['model_weights']['nested_model'].attrs
               if attr.startswith('weight_names')])
        # The chunking of layer names array should have happened.
        self.assertGreater(num_weight_arrays, 0)
      out2 = model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)
Beispiel #23
0
    def test_load_weights_from_saved_model(self):
        save_path = self._save_model_dir()
        save_format = test_utils.get_save_format()

        if save_format == "h5" and test_utils.get_model_type() == "subclass":
            # TODO(b/173646281): HDF5 format currently does not allow saving
            # subclassed models.
            return

        with self.cached_session():
            model = test_utils.get_small_mlp(1, 4, input_dim=3)
            data = np.random.random((1, 3))
            labels = np.random.random((1, 4))
            model.compile(loss="mse", optimizer="rmsprop")
            model.fit(data, labels)
            model.save(save_path, save_format=save_format)
            new_model = test_utils.get_small_mlp(1, 4, input_dim=3)
            if test_utils.get_model_type() == "subclass":
                # Call on test data to build the model.
                new_model.predict(data)
            new_model.load_weights(save_path)
            self.assertAllClose(model.weights, new_model.weights)
Beispiel #24
0
  def test_custom_functional_registered(self):

    def _get_cls_definition():
      class CustomModel(keras.Model):

        def c(self):
          return 'c'

      return CustomModel

    cls = _get_cls_definition()
    self.assertEqual(cls.__bases__[0], keras.Model)

    with self.cached_session() as sess:
      input_ = keras.layers.Input(shape=(1,))
      output = keras.layers.Dense(1)(input_)
      model = cls(input_, output)
      # `cls` now inherits from `Functional` class.
      self.assertEqual(cls.__bases__[0], functional.Functional)

      if not tf.executing_eagerly():
        sess.run([v.initializer for v in model.variables])

      save_format = test_utils.get_save_format()
      saved_model_dir = self._save_model_dir()
      keras.models.save_model(model, saved_model_dir, save_format=save_format)

    loaded_model = keras.models.load_model(
        saved_model_dir, custom_objects={'CustomModel': cls})
    self.assertIsInstance(loaded_model, cls)

    # Check with "new" `CustomModel` class definition.
    new_cls = _get_cls_definition()
    # The new `CustomModel` class is *not* derived from `Functional`.
    self.assertEqual(new_cls.__bases__[0], keras.Model)
    reloaded_model = keras.models.load_model(
        saved_model_dir, custom_objects={'CustomModel': new_cls})
    self.assertIsInstance(reloaded_model, new_cls)
Beispiel #25
0
  def test_functional_model_with_custom_loss_and_metric(self):
    def _make_model():
      inputs = keras.Input(shape=(4,))
      x = keras.layers.Dense(8, activation='relu')(inputs)
      outputs = keras.layers.Dense(3, activation='softmax')(x)
      model = keras.Model(inputs=inputs, outputs=outputs)
      custom_loss = keras.layers.Lambda(lambda x: keras.backend.sum(x * x))(x)
      model.add_loss(custom_loss)
      model.add_metric(custom_loss, aggregation='mean', name='custom_loss')
      return model

    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()

    with self.cached_session():
      model = _make_model()
      model.compile(
          loss=keras.losses.SparseCategoricalCrossentropy(),
          optimizer=optimizers.gradient_descent_v2.SGD(),
          metrics=[keras.metrics.SparseCategoricalCrossentropy()])
      x = np.random.normal(size=(32, 4))
      y = np.random.randint(0, 3, size=32)
      model.train_on_batch(x, y)
      evaluation_results = model.evaluate(x, y)
      # Save and reload model.
      model.save(saved_model_dir, save_format=save_format)
      del model  # Prevent misuse.
      loaded_model = keras.models.load_model(saved_model_dir)
      loaded_model_eval_results = loaded_model.evaluate(x, y)
      # Assert all evaluation results are the same.
      self.assertAllClose(evaluation_results, loaded_model_eval_results, 1e-9)
      # Check correctness of the loss calculation.
      self.assertAllGreater(evaluation_results, 0.)
      evaluation_results = dict(
          zip(loaded_model.metrics_names, evaluation_results))
      self.assertNear(
          evaluation_results['sparse_categorical_crossentropy'] +
          evaluation_results['custom_loss'], evaluation_results['loss'], 1e-6)
Beispiel #26
0
  def test_saving_model_with_long_layer_names(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()
    with self.cached_session():
      # This layer name will make the `layers_name` HDF5 attribute blow
      # out of proportion. Note that it fits into the internal HDF5
      # attribute memory limit on its own but because h5py converts
      # the list of layer names into numpy array, which uses the same
      # amount of memory for every item, it increases the memory
      # requirements substantially.
      x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15)))
      f = x
      for i in range(4):
        f = keras.layers.Dense(2, name='dense_%d' % (i,))(f)
      model = keras.Model(inputs=[x], outputs=[f])
      model.compile(
          'adam', loss=keras.losses.MeanSquaredError(), metrics=['acc'])

      x = np.random.random((1, 2))
      y = np.random.random((1, 2))
      model.train_on_batch(x, y)
      out = model.predict(x)

      keras.models.save_model(model, saved_model_dir, save_format=save_format)
      model = keras.models.load_model(saved_model_dir)

      if save_format in ['tf', 'tensorflow']:
        return
      # Check that the HDF5 files contains chunked array
      # of layer names.
      with h5py.File(saved_model_dir, 'r') as h5file:
        num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
                                if attr.startswith('layer_names')])
      # The chunking of layer names array should have happened.
      self.assertGreater(num_names_arrays, 0)
      out2 = model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)
Beispiel #27
0
  def test_shared_objects(self):
    class OuterLayer(keras.layers.Layer):

      def __init__(self, inner_layer):
        super(OuterLayer, self).__init__()
        self.inner_layer = inner_layer

      def call(self, inputs):
        return self.inner_layer(inputs)

      def get_config(self):
        return {
            'inner_layer': generic_utils.serialize_keras_object(
                self.inner_layer)
        }

      @classmethod
      def from_config(cls, config):
        return cls(generic_utils.deserialize_keras_object(
            config['inner_layer']))

    class InnerLayer(keras.layers.Layer):

      def __init__(self):
        super(InnerLayer, self).__init__()
        self.v = self.add_weight(name='v', shape=[], dtype=tf.float32)

      def call(self, inputs):
        return self.v + inputs

      @classmethod
      def from_config(cls, config):
        return cls()

    # Create a model with 2 output layers that share the same inner layer.
    inner_layer = InnerLayer()
    outer_layer_1 = OuterLayer(inner_layer)
    outer_layer_2 = OuterLayer(inner_layer)
    input_ = keras.Input(shape=(1,))
    model = keras.Model(
        inputs=input_, outputs=[outer_layer_1(input_), outer_layer_2(input_)])

    # Changes to the shared layer should affect both outputs.
    model.layers[1].inner_layer.v.assign(5)
    self.assertAllEqual(model(1), [6.0, 6.0])
    model.layers[1].inner_layer.v.assign(3)
    self.assertAllEqual(model(1), [4.0, 4.0])

    # After loading, changes to the shared layer should still affect both
    # outputs.
    def _do_assertions(loaded):
      loaded.layers[1].inner_layer.v.assign(5)
      self.assertAllEqual(loaded(1), [6.0, 6.0])
      loaded.layers[1].inner_layer.v.assign(3)
      self.assertAllEqual(loaded(1), [4.0, 4.0])
      loaded.layers[2].inner_layer.v.assign(5)
      self.assertAllEqual(loaded(1), [6.0, 6.0])
      loaded.layers[2].inner_layer.v.assign(3)
      self.assertAllEqual(loaded(1), [4.0, 4.0])

    # We'd like to make sure we only attach shared object IDs when strictly
    # necessary, so we'll recursively traverse the generated config to count
    # whether we have the exact number we expect.
    def _get_all_keys_recursive(dict_or_iterable):
      if isinstance(dict_or_iterable, dict):
        for key in dict_or_iterable.keys():
          yield key
        for key in _get_all_keys_recursive(dict_or_iterable.values()):
          yield key
      elif isinstance(dict_or_iterable, str):
        return
      else:
        try:
          for item in dict_or_iterable:
            for key in _get_all_keys_recursive(item):
              yield key
        # Not an iterable or dictionary
        except TypeError:
          return

    with generic_utils.CustomObjectScope({
        'OuterLayer': OuterLayer, 'InnerLayer': InnerLayer}):

      # Test saving and loading to disk
      save_format = test_utils.get_save_format()
      saved_model_dir = self._save_model_dir()
      keras.models.save_model(model, saved_model_dir, save_format=save_format)
      loaded = keras.models.load_model(saved_model_dir)
      _do_assertions(loaded)

      # Test recreating directly from config
      config = model.get_config()
      key_count = collections.Counter(_get_all_keys_recursive(config))
      self.assertEqual(key_count[generic_utils.SHARED_OBJECT_KEY], 2)
      loaded = keras.Model.from_config(config)
      _do_assertions(loaded)