Пример #1
0
 def test_save_load_hdf5_pathlib(self):
     if sys.version_info < (3, 6):
         self.skipTest(
             'pathlib is only available for python version >= 3.6')
     path = pathlib.Path(self.get_temp_dir()) / 'model'
     save.save_model(self.model, path, save_format='h5')
     save.load_model(path)
Пример #2
0
    def test_save_model_with_dynamic_loss_scaling(self, strategy_fn, h5=False):
        # TODO(reedwm): Support and test saving model with a mixed_[b]float16 policy
        # as well.
        strategy = strategy_fn()
        if (isinstance(strategy, tf.distribute.MirroredStrategy)
                and not tf.executing_eagerly()):
            # TODO(b/121381184): Enable running the test in this case.
            return

        # Create and run model.
        with strategy.scope():
            x = layers.Input(shape=(2, ), batch_size=2, dtype=tf.float32)
            y = mp_test_util.MultiplyLayer()(x)
            model = models.Model(inputs=x, outputs=y)

            opt = gradient_descent.SGD(1.)
            opt = loss_scale_optimizer.LossScaleOptimizer(
                opt, initial_scale=1., dynamic_growth_steps=2.)
            model.compile(optimizer=opt,
                          loss='mse',
                          run_eagerly=test_utils.should_run_eagerly())
        # Run for 3 steps (6 examples with a batch size of 2)
        model.fit(np.ones((6, 2)), np.zeros((6, 2)), batch_size=2)
        self.assertEqual(backend.get_value(opt.loss_scale), 2)
        self.assertEqual(backend.get_value(opt.dynamic_counter), 1)
        (weight, ) = model.trainable_weights
        orig_weight = backend.get_value(weight)

        # Save model weights.
        save_path = os.path.join(self.get_temp_dir(), 'model')
        model.save(save_path, save_format='h5' if h5 else 'tf')

        # Run model again for 1 step (2 examples with a batch size of 2)
        model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)
        new_weight = backend.get_value(weight)
        self.assertNotEqual(new_weight, orig_weight)
        self.assertEqual(backend.get_value(opt.loss_scale), 4)
        self.assertEqual(backend.get_value(opt.dynamic_counter), 0)

        # Load model weights and ensure loss scale weights are restored.
        model = save.load_model(
            save_path,
            custom_objects={'MultiplyLayer': mp_test_util.MultiplyLayer})
        (weight, ) = model.trainable_weights
        loaded_weight = backend.get_value(weight)
        self.assertEqual(loaded_weight, orig_weight)
        # Currently the loss scale isn't always saved when the model is saved with
        # Model.save(). So we assert the loss scale either has the value when it was
        # saved, or the value it was initialized with.
        # TODO(reedwm): Always save/restore the loss scale with Model.save().
        self.assertIn(backend.get_value(model.optimizer.loss_scale), (1, 2))
        self.assertIn(backend.get_value(model.optimizer.dynamic_counter),
                      (0, 1))

        # Test optimizer attributes and type
        self.assertEqual(model.optimizer.initial_scale, 1.)
        self.assertEqual(model.optimizer.dynamic_growth_steps, 2.)
        self.assertEqual(type(model.optimizer),
                         loss_scale_optimizer.LossScaleOptimizer)
Пример #3
0
 def _load_and_run_model(self,
                         distribution,
                         saved_dir,
                         predict_dataset,
                         output_name='output_1'):
     restored_keras_model = save.load_model(saved_dir)
     return restored_keras_model.predict(predict_dataset,
                                         steps=test_base.PREDICT_STEPS)
Пример #4
0
    def _test_saving(self, model, dataset, save_format, use_regularizer):
        # Save and load model, asserting variable does not change
        save_path = os.path.join(self.get_temp_dir(), "model")
        model.save(save_path, save_format=save_format)
        model = save.load_model(save_path)
        (layer,) = (
            layer
            for layer in model.layers
            if "MultiplyLayer" in layer.__class__.__name__
        )
        expected = 1 - 2**-14
        if use_regularizer:
            expected -= 2 * 2**-14
        self.assertEqual(backend.eval(layer.v), expected)

        # Continue training, and assert variable is correct value
        model.fit(dataset)
        new_expected = expected - 2**-14
        if use_regularizer:
            new_expected -= 2 * 2**-14
        self.assertEqual(backend.eval(layer.v), new_expected)

        # Load saved model again, and assert variable is previous value
        model = save.load_model(save_path)
        (layer,) = (
            layer
            for layer in model.layers
            if "MultiplyLayer" in layer.__class__.__name__
        )
        self.assertEqual(backend.eval(layer.v), expected)

        # Ensure various dtype-related aspects of the layer are correct
        self.assertEqual(layer.dtype, "float32")
        self.assertEqual(layer.dtype_policy.name, "mixed_float16")
        self.assertEqual(layer.v.dtype, "float32")
        self.assertEqual(layer(np.ones((2, 1))).dtype, "float16")

        self.assertEqual(type(model.dtype_policy), policy.Policy)
        self.assertEqual(
            layer.get_config()["dtype"],
            {"class_name": "Policy", "config": {"name": "mixed_float16"}},
        )
Пример #5
0
    def _test_saving(self, model, dataset, save_format, use_regularizer):
        # Save and load model, asserting variable does not change
        save_path = os.path.join(self.get_temp_dir(), 'model')
        model.save(save_path, save_format=save_format)
        model = save.load_model(save_path)
        (layer, ) = (layer for layer in model.layers
                     if 'MultiplyLayer' in layer.__class__.__name__)
        expected = 1 - 2**-14
        if use_regularizer:
            expected -= 2 * 2**-14
        self.assertEqual(backend.eval(layer.v), expected)

        # Continue training, and assert variable is correct value
        model.fit(dataset)
        new_expected = expected - 2**-14
        if use_regularizer:
            new_expected -= 2 * 2**-14
        self.assertEqual(backend.eval(layer.v), new_expected)

        # Load saved model again, and assert variable is previous value
        model = save.load_model(save_path)
        (layer, ) = (layer for layer in model.layers
                     if 'MultiplyLayer' in layer.__class__.__name__)
        self.assertEqual(backend.eval(layer.v), expected)

        # Ensure various dtype-related aspects of the layer are correct
        self.assertEqual(layer.dtype, 'float32')
        self.assertEqual(
            get_layer_policy.get_layer_policy(layer).name, 'mixed_float16')
        self.assertEqual(layer.v.dtype, 'float32')
        self.assertEqual(layer(np.ones((2, 1))).dtype, 'float16')

        # Loading a model always loads with a v2 Policy, even if saved with a
        # PolicyV1.
        self.assertEqual(type(model.dtype_policy), policy.Policy)
        self.assertEqual(layer.get_config()['dtype'], {
            'class_name': 'Policy',
            'config': {
                'name': 'mixed_float16'
            }
        })
Пример #6
0
 def test_restore_old_saved_model(self):
   saved_model_dir = os.path.join(
       flags.FLAGS['test_srcdir'].value,
       'org_keras/keras',
       'mixed_precision/testdata/lso_savedmodel_tf2.2')
   # saved_model_dir = test.test_src_dir_path(
   #     'python/keras/mixed_precision/testdata/'
   #     'lso_savedmodel_tf2.2')
   model = save.load_model(saved_model_dir)
   expected_kernel = np.array([[9.229685, 10.901115], [10.370763, 9.757362]])
   self.assertAllClose(backend.eval(model.weights[0]), expected_kernel)
   self.assertEqual(type(model.optimizer),
                    loss_scale_optimizer.LossScaleOptimizer)
Пример #7
0
 def test_state_saving_and_loading(self):
   with self.cached_session():
     input_data = np.random.random((1, 2))
     rff_layer = kernel_layers.RandomFourierFeatures(output_dim=10, scale=3.0)
     inputs = input_layer.Input((2,))
     outputs = rff_layer(inputs)
     model = training.Model(inputs, outputs)
     output_data = model.predict(input_data)
     temp_dir = self.get_temp_dir()
     self.addCleanup(shutil.rmtree, temp_dir)
     saved_model_dir = os.path.join(temp_dir, 'rff_model')
     model.save(saved_model_dir)
     new_model = save.load_model(saved_model_dir)
     new_output_data = new_model.predict(input_data)
     self.assertAllClose(output_data, new_output_data, atol=1e-4)
Пример #8
0
  def test_saving_model_with_custom_object(self):
    with generic_utils.custom_object_scope(), self.cached_session():

      @generic_utils.register_keras_serializable()
      class CustomLoss(losses.MeanSquaredError):
        pass

      model = sequential.Sequential(
          [core.Dense(units=1, input_shape=(1,))])
      model.compile(optimizer='sgd', loss=CustomLoss())
      model.fit(np.zeros([10, 1]), np.zeros([10, 1]))

      temp_dir = self.get_temp_dir()
      filepath = os.path.join(temp_dir, 'saving')
      model.save(filepath)

      # Make sure the model can be correctly load back.
      _ = save.load_model(filepath, compile=True)
Пример #9
0
def deserialize_model_from_bytecode(serialized_model):
    """Reconstruct a Model from the output of `serialize_model_as_bytecode`.

    Args:
        serialized_model: (np.array) return value from
          `serialize_model_as_bytecode`.

    Returns:
        keras.Model: Keras Model instance.
    """
    temp_dir = f"ram://{uuid.uuid4()}"
    b = io.BytesIO(serialized_model)
    with tarfile.open(fileobj=b, mode="r") as archive:
        for name in archive.getnames():
            dest_path = tf.io.gfile.join(temp_dir, name)
            member = archive.getmember(name)
            tf.io.gfile.makedirs(os.path.dirname(dest_path))
            if member.isfile():
                with tf.io.gfile.GFile(dest_path, "wb") as f:
                    f.write(archive.extractfile(name).read())
    model = save_module.load_model(temp_dir)
    tf.io.gfile.rmtree(temp_dir)
    return model
Пример #10
0
 def test_save_load_hdf5_pathlib(self):
   path = pathlib.Path(self.get_temp_dir()) / 'model'
   save.save_model(self.model, path, save_format='h5')
   save.load_model(path)
Пример #11
0
 def test_load_file_not_found(self):
   path = pathlib.Path(self.get_temp_dir()) / 'does_not_exist'
   with self.assertRaisesRegex(IOError, 'No file or directory found at'):
     save.load_model(path)
Пример #12
0
 def test_save_load_tf_string(self):
   path = os.path.join(self.get_temp_dir(), 'model')
   save.save_model(self.model, path, save_format='tf')
   save.load_model(path)