def test_saving_sequential_model(self):
    with self.cached_session():
      model = keras.models.Sequential()
      model.add(keras.layers.Dense(2, input_shape=(3,)))
      model.add(keras.layers.RepeatVector(3))
      model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
      model.compile(
          loss=keras.losses.MSE,
          optimizer=rmsprop.RMSprop(lr=0.0001),
          metrics=[keras.metrics.categorical_accuracy],
          sample_weight_mode='temporal',
          run_eagerly=testing_utils.should_run_eagerly(),
          run_distributed=testing_utils.should_run_distributed())
      x = np.random.random((1, 3))
      y = np.random.random((1, 3, 3))
      model.train_on_batch(x, y)

      ref_y = model.predict(x)

      saved_model_dir = self._save_model_dir()
      keras_saved_model.export_saved_model(model, saved_model_dir)

      loaded_model = keras_saved_model.load_from_saved_model(saved_model_dir)
      y = loaded_model.predict(x)
      self.assertAllClose(ref_y, y, atol=1e-05)
    def test_fit_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])

        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            workers=4,
                            use_multiprocessing=True)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(),
                            validation_steps=10)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            workers=0)
示例#3
0
  def testMinimizeSparseResourceVariableCentered(self):
    # TODO(tanzheny, omalleyt): Fix test in eager mode.
    with ops.Graph().as_default():
      for dtype in _DATA_TYPES:
        if test_util.is_xla_enabled() and dtype.is_complex:
          self.skipTest("b/143578550")
        var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
        x = constant_op.constant([[4.0], [5.0]], dtype=dtype)

        def loss():
          pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)  # pylint: disable=cell-var-from-loop
          return pred * pred

        # loss = lambda: pred * pred  # pylint: disable=cell-var-from-loop
        sgd_op = rmsprop.RMSprop(
            learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0,
            centered=True).minimize(
                loss, var_list=[var0])
        self.evaluate(variables.global_variables_initializer())
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
        # Run 1 step of sgd
        self.evaluate(sgd_op)
        # Validate updated params
        self.assertAllCloseAccordingToType([[-111, -138]],
                                           self.evaluate(var0),
                                           atol=0.01)
示例#4
0
  def test_model_methods_with_eager_tensors_single_io(self):
    if not context.executing_eagerly():
      # Only test V2 Function and V2 Eager modes, as V1 Graph mode with
      # symbolic tensors has different requirements.
      return

    model = testing_utils.get_small_mlp(10, 4, 3)

    optimizer = rmsprop.RMSprop(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae', metrics_module.CategoricalAccuracy()]
    model.compile(
        optimizer,
        loss,
        metrics=metrics,
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    inputs = array_ops.zeros(shape=(10, 3))
    targets = array_ops.zeros(shape=(10, 4))

    model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
    model.fit(inputs, targets, epochs=1, batch_size=3, verbose=0, shuffle=False)
    model.fit(inputs, targets, epochs=1, batch_size=4, verbose=0,
              validation_data=(inputs, targets))
    model.evaluate(inputs, targets, batch_size=2, verbose=0)
    model.predict(inputs, batch_size=2)
    model.train_on_batch(inputs, targets)
    model.test_on_batch(inputs, targets)
    def test_model_fit_and_validation_with_missing_arg_errors(self):
        model = testing_utils.get_small_mlp(10, 4, 3)
        model.compile(optimizer=rmsprop.RMSprop(learning_rate=0.001),
                      loss='mse',
                      run_eagerly=True)

        x = array_ops.zeros(shape=(10, 3))
        y = array_ops.zeros(shape=(10, 4))
        dataset = dataset_ops.Dataset.from_tensor_slices(
            (x, y)).repeat(10).batch(5)
        validation_dataset = dataset_ops.Dataset.from_tensor_slices(
            (x, y)).repeat().batch(5)  # Infinite dataset.

        model.fit(dataset, epochs=1, verbose=0)

        # Step argument is required for infinite datasets.
        with self.assertRaises(ValueError):
            model.fit(dataset,
                      steps_per_epoch=2,
                      epochs=1,
                      verbose=0,
                      validation_data=validation_dataset)
        with self.assertRaises(ValueError):
            model.fit(dataset,
                      steps_per_epoch=2,
                      epochs=1,
                      verbose=0,
                      validation_data=validation_dataset)
  def test_batchnorm_non_trainable_with_tf_function(self):
    inputs = keras.Input((3,))
    bn = normalization_v2.BatchNormalization()
    outputs = bn(inputs)
    model = keras.Model(inputs, outputs)
    loss_fn = keras.losses.MeanSquaredError()
    optimizer = rmsprop_v2.RMSprop()

    @def_function.function()
    def train_step(x, y):
      with backprop.GradientTape() as tape:
        y_pred = model(x, training=True)
        loss = loss_fn(y, y_pred)
      grads = tape.gradient(loss, model.trainable_weights)
      optimizer.apply_gradients(zip(grads, model.trainable_weights))
      return loss

    @def_function.function()
    def test_step(x, y):
      y_pred = model(x, training=False)
      loss = loss_fn(y, y_pred)
      return loss

    train_step(np.random.random((100, 3)), np.random.random((100, 3)))

    test_data = np.random.random((10, 3))
    test_targets = np.random.random((10, 3))
    test_loss = test_step(test_data, test_targets)

    bn.trainable = False
    train_loss = train_step(test_data, test_targets)
    if context.executing_eagerly():
      self.assertAlmostEqual(test_loss.numpy(), train_loss.numpy())
  def test_saving_functional_model(self):
    with self.cached_session():
      inputs = keras.layers.Input(shape=(3,))
      x = keras.layers.Dense(2)(inputs)
      output = keras.layers.Dense(3)(x)

      model = keras.models.Model(inputs, output)
      model.compile(
          loss=keras.losses.MSE,
          optimizer=rmsprop.RMSprop(lr=0.0001),
          metrics=[keras.metrics.categorical_accuracy],
          run_eagerly=testing_utils.should_run_eagerly(),
          run_distributed=testing_utils.should_run_distributed())
      x = np.random.random((1, 3))
      y = np.random.random((1, 3))
      model.train_on_batch(x, y)

      ref_y = model.predict(x)

      saved_model_dir = self._save_model_dir()
      keras_saved_model.export_saved_model(model, saved_model_dir)
      loaded_model = keras_saved_model.load_from_saved_model(saved_model_dir)

      y = loaded_model.predict(x)
      self.assertAllClose(ref_y, y, atol=1e-05)
示例#8
0
    def test_sequence_input_to_fit_eval_predict(self):
        val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

        class CustomSequence(keras.utils.Sequence):
            def __getitem__(self, idx):
                return np.ones([10, 10], np.float32), np.ones([10, 1],
                                                              np.float32)

            def __len__(self):
                return 2

        model = testing_utils.get_small_mlp(num_hidden=10,
                                            num_classes=1,
                                            input_dim=10)

        model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy')
        model.fit(CustomSequence(), validation_data=val_data, epochs=2)
        model.evaluate(CustomSequence())
        model.predict(CustomSequence())

        with self.assertRaisesRegexp(ValueError,
                                     '`y` argument is not supported'):
            model.fit(CustomSequence(), y=np.ones([10, 1]))

        with self.assertRaisesRegexp(
                ValueError, '`sample_weight` argument is not supported'):
            model.fit(CustomSequence(), sample_weight=np.ones([10, 1]))
示例#9
0
 def test_dynamic_layer_error(self):
   with self.assertRaisesRegexp(TypeError,
                                'attempting to use Python control flow'):
     model = testing_utils.get_model_from_layers([DynamicLayer()],
                                                 input_shape=(3,))
     model.compile(rmsprop.RMSprop(0.001), loss='mse')
     model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
示例#10
0
    def test_generator_methods_with_sample_weights(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        model.fit_generator(custom_generator(mode=3),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(mode=3),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(mode=3),
                            validation_steps=10)
        model.predict_generator(custom_generator(mode=3),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.evaluate_generator(custom_generator(mode=3),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False)
示例#11
0
    def test_training_with_sequences(self):
        class DummySequence(keras.utils.Sequence):
            def __getitem__(self, idx):
                return np.zeros([10, 2]), np.ones([10, 4])

            def __len__(self):
                return 10

        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse', optimizer=rmsprop.RMSprop(1e-3))

        model.fit_generator(DummySequence(),
                            steps_per_epoch=10,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            max_queue_size=10,
                            workers=0,
                            use_multiprocessing=True)
        model.fit_generator(DummySequence(),
                            steps_per_epoch=10,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            max_queue_size=10,
                            workers=0,
                            use_multiprocessing=False)
示例#12
0
    def test_train_sequential_with_distribution_strategy(
            self, distribution, cloning):
        keras_model = simple_sequential_model()
        keras_model.compile(
            loss='categorical_crossentropy',
            metrics=[keras.metrics.CategoricalAccuracy()],
            optimizer=rmsprop_keras.RMSprop(learning_rate=0.01),
            cloning=cloning)
        config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
                                          model_dir=self._base_dir,
                                          train_distribute=distribution)
        with self.cached_session():
            est_keras = keras_lib.model_to_estimator(keras_model=keras_model,
                                                     config=config)
            before_eval_results = est_keras.evaluate(
                input_fn=get_ds_test_input_fn, steps=1)
            est_keras.train(input_fn=get_ds_train_input_fn,
                            steps=_TRAIN_SIZE / 16)
            after_eval_results = est_keras.evaluate(
                input_fn=get_ds_test_input_fn, steps=1)
            self.assertLess(after_eval_results['loss'],
                            before_eval_results['loss'])

        tf.compat.v1.summary.FileWriterCache.clear()
        tf.compat.v1.gfile.DeleteRecursively(self._config.model_dir)
示例#13
0
    def test_evaluate_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        self._sleep_at_end = True
        model.evaluate_generator(custom_generator(),
                                 steps=5,
                                 max_queue_size=10,
                                 workers=2,
                                 verbose=1,
                                 use_multiprocessing=True)
        model.evaluate_generator(custom_generator(),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False)
        model.evaluate_generator(custom_generator(),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False,
                                 workers=0)
示例#14
0
    def testMinimizeSparseResourceVariable(self):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        with ops.Graph().as_default():
            for dtype in _DATA_TYPES:
                var0 = variables.Variable([[1.0, 2.0]], dtype=dtype)
                x = constant_op.constant([[4.0], [5.0]], dtype=dtype)

                def loss():
                    pred = math_ops.matmul(
                        embedding_ops.embedding_lookup([var0], [0]), x)  # pylint: disable=cell-var-from-loop
                    return pred * pred

                sgd_op = rmsprop.RMSprop(learning_rate=1.0,
                                         rho=0.0,
                                         momentum=0.0,
                                         epsilon=0.0,
                                         centered=False).minimize(
                                             loss, var_list=[var0])
                self.evaluate(variables.global_variables_initializer())
                # Fetch params to validate initial values
                self.assertAllCloseAccordingToType([[1.0, 2.0]],
                                                   self.evaluate(var0))
                # Run 1 step of sgd
                self.evaluate(sgd_op)
                # Validate updated params
                self.assertAllCloseAccordingToType([[0., 1.]],
                                                   self.evaluate(var0),
                                                   atol=0.01)
 def test_loss_correctness_clipvalue_zero(self):
     # Test that training loss is the same in eager and graph
     # (by comparing it to a reference value in a deterministic case)
     # And confirm that setting clipvalue to zero stops all training
     layers = [
         keras.layers.Dense(3, activation='relu',
                            kernel_initializer='ones'),
         keras.layers.Dense(2,
                            activation='softmax',
                            kernel_initializer='ones')
     ]
     model = testing_utils.get_model_from_layers(layers, input_shape=(4, ))
     model.compile(loss='sparse_categorical_crossentropy',
                   optimizer=rmsprop.RMSprop(learning_rate=0.001,
                                             clipvalue=0.0),
                   run_eagerly=testing_utils.should_run_eagerly(),
                   experimental_run_tf_function=testing_utils.
                   should_run_tf_function())
     x = np.ones((100, 4))
     np.random.seed(123)
     y = np.random.randint(0, 1, size=(100, 1))
     history = model.fit(x, y, epochs=3, batch_size=10)
     self.assertAlmostEqual(history.history['loss'][-3], 0.6931, 4)
     self.assertAlmostEqual(history.history['loss'][-2], 0.6931, 4)
     self.assertAlmostEqual(history.history['loss'][-1], 0.6931, 4)
    def test_generator_methods(self):
        model = testing_utils.get_small_mlp(10, 4, 3)
        optimizer = rmsprop.RMSprop(learning_rate=0.001)
        model.compile(optimizer,
                      loss='mse',
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=True)

        x = np.random.random((10, 3))
        y = np.random.random((10, 4))

        def numpy_iterator():
            while True:
                yield x, y

        model.fit_generator(numpy_iterator(), steps_per_epoch=3, epochs=1)
        model.evaluate_generator(numpy_iterator(), steps=3)

        def inference_numpy_iterator():
            while True:
                yield x

        out = model.predict_generator(inference_numpy_iterator(), steps=3)
        self.assertEqual(out.shape, (30, 4))
示例#17
0
 def test_dynamic_layer(self):
   model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
                                               input_shape=(3,))
   self.assertEqual(model.dynamic, True)
   model.compile(rmsprop.RMSprop(0.001), loss='mse')
   self.assertEqual(model.run_eagerly, True)
   model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
示例#18
0
    def testMinimizeSparseResourceVariableCentered(self):
        for dtype in [dtypes.float32, dtypes.float64]:
            with self.cached_session():
                var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]],
                                                              dtype=dtype)
                x = constant_op.constant([[4.0], [5.0]], dtype=dtype)

                def loss():
                    pred = math_ops.matmul(
                        embedding_ops.embedding_lookup([var0], [0]), x)  # pylint: disable=cell-var-from-loop
                    return pred * pred

                # loss = lambda: pred * pred  # pylint: disable=cell-var-from-loop
                sgd_op = rmsprop.RMSprop(learning_rate=1.0,
                                         rho=0.0,
                                         momentum=0.0,
                                         epsilon=1.0,
                                         centered=True).minimize(
                                             loss, var_list=[var0])
                self.evaluate(variables.global_variables_initializer())
                # Fetch params to validate initial values
                self.assertAllCloseAccordingToType([[1.0, 2.0]],
                                                   self.evaluate(var0))
                # Run 1 step of sgd
                self.evaluate(sgd_op)
                # Validate updated params
                self.assertAllCloseAccordingToType([[-111, -138]],
                                                   self.evaluate(var0),
                                                   atol=0.01)
示例#19
0
 def test_invalid_forward_pass_in_eager_mode(self):
     inputs = keras.Input((3, ))
     outputs = InvalidLayer()(inputs)
     model = keras.Model(inputs, outputs)
     self.assertEqual(model._static_graph_friendly, False)
     if testing_utils.should_run_eagerly():
         model.compile(rmsprop.RMSprop(0.001), loss='mse', run_eagerly=True)
         with self.assertRaisesRegexp(ValueError,
                                      'You did something wrong!'):
             model.train_on_batch(np.random.random((2, 3)),
                                  np.random.random((2, 3)))
     else:
         with self.assertRaisesRegexp(
                 ValueError, 'only be successfully run in eager execution'):
             model.compile(rmsprop.RMSprop(0.001),
                           loss='mse',
                           run_eagerly=False)
示例#20
0
  def test_dynamic_layers_in_sequential_model(self):
    # Without input_shape argument
    model = keras.Sequential([DynamicLayer1(dynamic=True),
                              keras.layers.Dense(3),
                              DynamicLayer2(dynamic=True)])
    self.assertEqual(model.dynamic, True)
    model.compile(rmsprop.RMSprop(0.001), loss='mse')
    self.assertEqual(model.run_eagerly, True)
    model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))

    # With input_shape argument
    model = keras.Sequential([DynamicLayer1(dynamic=True, input_shape=(3,)),
                              DynamicLayer2(dynamic=True)])
    self.assertEqual(model.dynamic, True)
    model.compile(rmsprop.RMSprop(0.001), loss='mse')
    self.assertEqual(model.run_eagerly, True)
    model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
示例#21
0
 def test_dynamic_layer_with_deferred_sequential_model(self):
   model = keras.Sequential(
       [DynamicLayer(dynamic=True),
        keras.layers.Dense(3)])
   self.assertEqual(model.dynamic, True)
   model.compile(rmsprop.RMSprop(0.001), loss='mse')
   self.assertEqual(model.run_eagerly, True)
   model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
 def test_control_flow_in_deferred_sequential_model(self):
     model = keras.Sequential(
         [ControlFlowLayer1(),
          keras.layers.Dense(3),
          ControlFlowLayer2()])
     model.compile(rmsprop.RMSprop(0.001), loss='mse')
     model.train_on_batch(np.random.random((2, 3)), np.random.random(
         (2, 3)))
示例#23
0
  def test_save_load_h5(self, distribution):
    with self.cached_session():
      dataset = keras_test_lib.get_dataset(distribution)
      with distribution.scope():
        model = keras_test_lib.get_model()
        model.compile(rms_prop_keras.RMSprop(learning_rate=0.01), 'mse')
        model.fit(dataset, epochs=1, steps_per_epoch=1)

        weights_file = tempfile.mktemp('.h5')
        model.save_weights(weights_file)

        model_2 = keras_test_lib.get_model()
        model_2.compile(rms_prop_keras.RMSprop(learning_rate=0.01), 'mse')
        model_2.load_weights(weights_file)
        model_2.predict(
            keras_test_lib.get_predict_dataset(distribution), steps=2)
        model_2.fit(dataset, epochs=1, steps_per_epoch=1)
 def test_dynamic_layer_error_running_in_graph_mode(self):
     with context.graph_mode():
         model = testing_utils.get_model_from_layers(
             [DynamicLayer(dynamic=True)], input_shape=(3, ))
         self.assertEqual(model.dynamic, True)
         # But then you cannot run the model since you're in a graph scope.
         with self.assertRaisesRegexp(ValueError,
                                      'You must enable eager execution'):
             model.compile(rmsprop.RMSprop(0.001), loss='mse')
示例#25
0
  def nested_dynamic_layers_in_eager_mode(self):
    inputs = keras.Input((3,))
    outputs = DynamicLayer1()(inputs)
    inner_model = keras.Model(inputs, outputs)

    inputs = keras.Input((3,))
    x = DynamicLayer2()(inputs)
    outputs = inner_model(x)

    model = keras.Model(inputs, outputs)
    self.assertEqual(model._static_graph_friendly, False)
    if testing_utils.should_run_eagerly():
      model.compile(rmsprop.RMSprop(0.001), loss='mse', run_eagerly=True)
      model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
    else:
      with self.assertRaisesRegexp(
          ValueError, 'only be successfully run in eager execution'):
        model.compile(rmsprop.RMSprop(0.001), loss='mse', run_eagerly=False)
示例#26
0
    def testSparse(self):
        for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
            for centered in [False, True]:
                with ops.Graph().as_default(), self.cached_session(
                    use_gpu=True
                ):
                    var0_np = np.array(
                        [1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype
                    )
                    grads0_np = np.array(
                        [0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype
                    )
                    var1_np = np.array(
                        [3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype
                    )
                    grads1_np = np.array(
                        [0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype
                    )

                    var0 = variables.Variable(var0_np)
                    var1 = variables.Variable(var1_np)
                    var0_ref = variables.Variable(var0_np)
                    var1_ref = variables.Variable(var1_np)
                    grads0_np_indices = np.array([0, 2], dtype=np.int32)
                    grads0 = ops.IndexedSlices(
                        constant_op.constant(grads0_np[grads0_np_indices]),
                        constant_op.constant(grads0_np_indices),
                        constant_op.constant([3]),
                    )
                    grads1_np_indices = np.array([0, 2], dtype=np.int32)
                    grads1 = ops.IndexedSlices(
                        constant_op.constant(grads1_np[grads1_np_indices]),
                        constant_op.constant(grads1_np_indices),
                        constant_op.constant([3]),
                    )
                    opt = ConstrainedRMSprop(centered=centered)
                    update = opt.apply_gradients(
                        zip([grads0, grads1], [var0, var1])
                    )
                    opt_ref = rmsprop.RMSprop(centered=centered)
                    update_ref = opt_ref.apply_gradients(
                        zip([grads0, grads1], [var0_ref, var1_ref])
                    )
                    self.evaluate(variables.global_variables_initializer())

                    # Run 3 steps
                    for t in range(3):
                        update.run()
                        update_ref.run()

                        # Validate updated params
                        self.assertAllCloseAccordingToType(
                            self.evaluate(var0_ref), self.evaluate(var0)
                        )
                        self.assertAllCloseAccordingToType(
                            self.evaluate(var1_ref), self.evaluate(var1)
                        )
    def test_training_arg_in_defun(self):
        layer = self._get_layer_with_training_arg()
        model = testing_utils.get_model_from_layers([layer], input_shape=(1, ))
        model.compile(rmsprop.RMSprop(0.), loss='mae')
        history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
        self.assertEqual(history.history['loss'][0], 1.)
        loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))
        self.assertEqual(loss, 0.)

        # Test that the argument injection performed in `call` is not active
        # when the argument is passed explicitly.
        layer = self._get_layer_with_training_arg()
        inputs = keras.Input(shape=(1, ))
        # Pass `training` by name
        outputs = layer(inputs, training=False)
        model = keras.Model(inputs, outputs)
        model.compile(rmsprop.RMSprop(0.), loss='mae')
        history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
        self.assertEqual(history.history['loss'][0], 0.)
示例#28
0
 def test_dynamic_layer_error(self):
     if testing_utils.get_model_type() == 'sequential':
         # TODO(scottzhu): Reenable this once sequential is moved to frozen_keras.
         self.skipTest(
             'Sequential model will check layer instance type and fail.')
     with self.assertRaisesRegexp(TypeError,
                                  'attempting to use Python control flow'):
         model = testing_utils.get_model_from_layers([DynamicLayer()],
                                                     input_shape=(3, ))
         model.compile(rmsprop.RMSprop(0.001), loss='mse')
         model.train_on_batch(np.random.random((2, 3)),
                              np.random.random((2, 3)))
示例#29
0
 def test_dynamic_layer(self):
     if testing_utils.get_model_type() == 'sequential':
         # TODO(scottzhu): Reenable this once sequential is moved to frozen_keras.
         self.skipTest(
             'Sequential model will check layer instance type and fail.')
     model = testing_utils.get_model_from_layers(
         [DynamicLayer(dynamic=True)], input_shape=(3, ))
     self.assertEqual(model.dynamic, True)
     model.compile(rmsprop.RMSprop(0.001), loss='mse')
     self.assertEqual(model.run_eagerly, True)
     model.train_on_batch(np.random.random((2, 3)), np.random.random(
         (2, 3)))
示例#30
0
    def test_sequential_model_saving(self):
        if h5py is None:
            self.skipTest('h5py required to run this test')

        with self.cached_session():
            model = keras.models.Sequential()
            model.add(keras.layers.Dense(2, input_shape=(3, )))
            model.add(keras.layers.RepeatVector(3))
            model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
            model.compile(loss=keras.losses.MSE,
                          optimizer=rmsprop.RMSprop(lr=0.0001),
                          metrics=[
                              keras.metrics.categorical_accuracy,
                              keras.metrics.CategoricalAccuracy()
                          ],
                          weighted_metrics=[
                              keras.metrics.categorical_accuracy,
                              keras.metrics.CategoricalAccuracy()
                          ],
                          sample_weight_mode='temporal')
            x = np.random.random((1, 3))
            y = np.random.random((1, 3, 3))
            model.train_on_batch(x, y)

            out = model.predict(x)
            fd, fname = tempfile.mkstemp('.h5')
            keras.models.save_model(model, fname)

            new_model = keras.models.load_model(fname)
            os.close(fd)
            os.remove(fname)

            out2 = new_model.predict(x)
            self.assertAllClose(out, out2, atol=1e-05)

            # test that new updates are the same with both models
            x = np.random.random((1, 3))
            y = np.random.random((1, 3, 3))
            model.train_on_batch(x, y)
            new_model.train_on_batch(x, y)

            x = np.random.random((1, 3))
            y = np.random.random((1, 3, 3))
            eval_out = model.evaluate(x, y)
            eval_out2 = new_model.evaluate(x, y)
            self.assertArrayNear(eval_out, eval_out2, 0.001)

            out = model.predict(x)
            out2 = new_model.predict(x)

            # TODO(b/120930751) This tolerance should be 1e-05,
            # very concerning that its not.
            self.assertAllClose(out, out2, atol=1e-03)