Exemplo n.º 1
0
  def testNumericEquivalenceForAmsgrad(self):
    if tf.executing_eagerly():
      self.skipTest(
          'v1 optimizer does not run in eager mode')
    np.random.seed(1331)
    with test_utils.use_gpu():
      train_samples = 20
      input_dim = 3
      num_classes = 2
      (x, y), _ = test_utils.get_test_data(
          train_samples=train_samples,
          test_samples=10,
          input_shape=(input_dim,),
          num_classes=num_classes)
      y = np_utils.to_categorical(y)

      num_hidden = 5
      model_k_v1 = test_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2 = test_utils.get_small_sequential_mlp(
          num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
      model_k_v2.set_weights(model_k_v1.get_weights())

      opt_k_v1 = optimizer_v1.Adam(amsgrad=True)
      opt_k_v2 = adam.Adam(amsgrad=True)

      model_k_v1.compile(
          opt_k_v1,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=test_utils.should_run_eagerly())
      model_k_v2.compile(
          opt_k_v2,
          loss='categorical_crossentropy',
          metrics=[],
          run_eagerly=test_utils.should_run_eagerly())

      hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
      hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)

      self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
      self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
      self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
Exemplo n.º 2
0
 def test_string_input(self):
     seq = keras.Sequential(
         [
             keras.layers.InputLayer(input_shape=(1,), dtype=tf.string),
             keras.layers.Lambda(lambda x: x[0]),
         ]
     )
     seq.run_eagerly = test_utils.should_run_eagerly()
     preds = seq.predict([["tensorflow eager"]])
     self.assertEqual(preds.shape, (1,))
Exemplo n.º 3
0
 def test_activity_regularization(self, regularizer):
     (x_train, y_train), _ = self.get_data()
     model = self.create_model(activity_regularizer=regularizer)
     model.compile(
         loss="categorical_crossentropy",
         optimizer="sgd",
         run_eagerly=test_utils.should_run_eagerly(),
     )
     self.assertEqual(len(model.losses), 1 if tf.executing_eagerly() else 1)
     model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
Exemplo n.º 4
0
 def _get_simple_bias_model(self):
     model = test_utils.get_model_from_layers(
         [test_utils.Bias()], input_shape=(1,)
     )
     model.compile(
         keras.optimizers.optimizer_v2.gradient_descent.SGD(0.1),
         "mae",
         run_eagerly=test_utils.should_run_eagerly(),
     )
     return model
Exemplo n.º 5
0
    def test_GRU_runtime_with_mask(self):
        # Masking will affect which backend is selected based on whether the mask
        # is strictly right padded.
        layer = keras.layers.GRU(self.rnn_state_size, return_runtime=True)

        inputs = keras.layers.Input(shape=[self.timestep, self.input_shape],
                                    dtype=tf.float32)
        masked_inputs = keras.layers.Masking()(inputs)

        outputs, runtime = layer(masked_inputs)
        # Expand the runtime so that it is a 1D tensor instead of scalar.
        # TF model does not work with scalar model output, specially during
        # aggregation.
        runtime = keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1))(
            runtime)
        model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])

        (x_train, y_train), _ = test_utils.get_test_data(
            train_samples=self.batch,
            test_samples=0,
            input_shape=(self.timestep, self.input_shape),
            num_classes=self.output_shape,
        )
        y_train = np_utils.to_categorical(y_train, self.output_shape)

        model.compile(
            optimizer="sgd",
            loss=["categorical_crossentropy", None],
            run_eagerly=test_utils.should_run_eagerly(),
        )

        model.fit(x_train, y_train)

        # Verify unpadded data.
        _, runtime_value = model.predict(x_train)
        if tf.test.is_gpu_available():
            self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU)
        else:
            self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)

        # Update x/y to be right padded by setting the last timestep to 0
        x_train[:, -1, :] = 0
        y_train[:, -1] = 0
        _, runtime_value = model.predict(x_train)
        if tf.test.is_gpu_available():
            self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU)
        else:
            self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)

        # Further update x/y to be mix padded (masks in the middle), and verify
        # only cpu kernel can be selected.
        x_train[:, -3, :] = 0
        y_train[:, -3] = 0
        _, runtime_value = model.predict(x_train)
        self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
def get_compiled_multi_io_model_temporal(sample_weight_mode):
    model = get_multi_io_temporal_model()
    model.compile(
        optimizer=optimizer_v2.gradient_descent.SGD(0.1),
        loss="mae",
        metrics=[metrics.MeanAbsoluteError(name="mae")],
        weighted_metrics=[metrics.MeanAbsoluteError(name="mae_2")],
        sample_weight_mode=sample_weight_mode,
        run_eagerly=test_utils.should_run_eagerly(),
    )
    return model
Exemplo n.º 7
0
    def test_sequential_nesting(self):
        model = test_utils.get_small_sequential_mlp(4, 3)
        inner_model = test_utils.get_small_sequential_mlp(4, 5)
        model.add(inner_model)

        model.compile(loss='mse',
                      optimizer='rmsprop',
                      run_eagerly=test_utils.should_run_eagerly())
        x = np.random.random((2, 6))
        y = np.random.random((2, 5))
        model.fit(x, y, epochs=1)
Exemplo n.º 8
0
 def test_add_entropy_loss_on_functional_model(self):
   inputs = Input(shape=(1,))
   targets = Input(shape=(1,))
   outputs = test_utils.Bias()(inputs)
   model = Model([inputs, targets], outputs)
   model.add_loss(losses.binary_crossentropy(targets, outputs))
   model.compile('sgd', run_eagerly=test_utils.should_run_eagerly())
   with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
     model.fit([self.x, self.y], batch_size=3, epochs=5)
     self.assertNotIn('Gradients do not exist for variables',
                      str(mock_log.call_args))
Exemplo n.º 9
0
 def _get_multiple_input_model(self, subclassed=True):
     if subclassed:
         model = MultiInputSubclassed()
     else:
         model = multi_input_functional()
     model.compile(
         keras.optimizers.optimizer_v2.gradient_descent.SGD(0.1),
         "mae",
         run_eagerly=test_utils.should_run_eagerly(),
     )
     return model
Exemplo n.º 10
0
    def test_restore_old_loss_scale_checkpoint(self):
        # Ensure a checkpoint from TF 2.2 can be loaded. The checkpoint format
        # of LossScaleOptimizer changed, but old checkpoints can still be loaded
        opt = gradient_descent.SGD(0.1, momentum=0.1)
        opt = loss_scale_optimizer.LossScaleOptimizer(opt)
        model = sequential.Sequential(
            [
                core.Dense(
                    2,
                )
            ]
        )

        # The checkpoint and expected values were obtained from the program in
        # testdata/BUILD.
        ckpt_dir = os.path.join(
            flags.FLAGS["test_srcdir"].value,
            "org_keras/keras",
            "mixed_precision/testdata/lso_ckpt_tf2.2",
        )
        # ckpt_dir = test.test_src_dir_path(
        #     'python/keras/mixed_precision/testdata/lso_ckpt_tf2.2')
        model.load_weights(os.path.join(ckpt_dir, "ckpt"))
        model.compile(opt, "mse", run_eagerly=test_utils.should_run_eagerly())
        model(np.zeros((2, 2)))  # Create model weights
        opt._create_all_weights(model.weights)
        expected_kernel = np.array(
            [[9.229685, 10.901115], [10.370763, 9.757362]]
        )
        expected_slot = np.array([[10.049943, 9.917691], [10.049943, 9.917691]])
        self.assertAllClose(self.evaluate(model.weights[0]), expected_kernel)
        self.assertAllClose(
            self.evaluate(opt.get_slot(model.weights[0], "momentum")),
            expected_slot,
        )
        self.assertEqual(self.evaluate(opt.loss_scale), 32768)
        self.assertEqual(self.evaluate(opt.dynamic_counter), 1)

        # Check restoring works even after the model is compiled and the weights
        # have been created.
        model.fit(np.random.normal(size=(2, 2)), np.random.normal(size=(2, 2)))
        self.assertNotAllClose(self.evaluate(model.weights[0]), expected_kernel)
        self.assertNotAllClose(
            self.evaluate(opt.get_slot(model.weights[0], "momentum")),
            expected_slot,
        )
        model.load_weights(os.path.join(ckpt_dir, "ckpt"))
        self.assertAllClose(self.evaluate(model.weights[0]), expected_kernel)
        self.assertAllClose(
            self.evaluate(opt.get_slot(model.weights[0], "momentum")),
            expected_slot,
        )
        self.assertEqual(self.evaluate(opt.loss_scale), 32768)
        self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
Exemplo n.º 11
0
    def test_serializing_model_with_metric_with_custom_object_scope(
            self, value):
        def get_instance(x):
            if isinstance(x, str):
                return x
            if isinstance(x, type) and issubclass(x, metrics.Metric):
                return x()
            return x

        metric_input = tf.nest.map_structure(get_instance, value)
        weighted_metric_input = tf.nest.map_structure(get_instance, value)

        with generic_utils.custom_object_scope({
                "MyMeanAbsoluteError": MyMeanAbsoluteError,
                "_my_mae": _my_mae,
                "Bias": test_utils.Bias,
        }):
            model = _get_multi_io_model()
            model.compile(
                optimizer_v2.gradient_descent.SGD(0.1),
                "mae",
                metrics=metric_input,
                weighted_metrics=weighted_metric_input,
                run_eagerly=test_utils.should_run_eagerly(),
            )
            history = model.fit(
                [self.x, self.x],
                [self.y, self.y],
                batch_size=3,
                epochs=3,
                sample_weight=[self.w, self.w],
            )

            # Assert training.
            self.assertAllClose(history.history["loss"], [2.0, 1.6, 1.2], 1e-3)
            eval_results = model.evaluate(
                [self.x, self.x],
                [self.y, self.y],
                sample_weight=[self.w, self.w],
            )

            if h5py is None:
                return
            model.save(self.model_filename)
            loaded_model = keras.models.load_model(self.model_filename)
            loaded_model.predict([self.x, self.x])
            loaded_eval_results = loaded_model.evaluate(
                [self.x, self.x],
                [self.y, self.y],
                sample_weight=[self.w, self.w],
            )

            # Assert all evaluation results are the same.
            self.assertAllClose(eval_results, loaded_eval_results, 1e-9)
Exemplo n.º 12
0
 def _get_compiled_multi_io_model(self):
   model = get_multi_io_model()
   model.compile(
       optimizer='rmsprop',
       loss='mse',
       metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
       weighted_metrics=[
           metrics.MeanSquaredError(name='mean_squared_error_2')
       ],
       run_eagerly=test_utils.should_run_eagerly())
   return model
Exemplo n.º 13
0
 def test_wide_deep_model_with_single_input(self):
     linear_model = linear.LinearModel(units=1)
     dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
     wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
     inputs = np.random.uniform(low=-5., high=5., size=(64, 3))
     output = .3 * inputs[:, 0]
     wide_deep_model.compile(optimizer=['sgd', 'adam'],
                             loss='mse',
                             metrics=[],
                             run_eagerly=test_utils.should_run_eagerly())
     wide_deep_model.fit(inputs, output, epochs=5)
Exemplo n.º 14
0
 def test_zero_regularization(self):
     # Verifies that training with zero regularization works.
     x, y = np.ones((10, 10)), np.ones((10, 3))
     model = test_utils.get_model_from_layers([
         keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))
     ],
                                              input_shape=(10, ))
     model.compile('sgd',
                   'mse',
                   run_eagerly=test_utils.should_run_eagerly())
     model.fit(x, y, batch_size=5, epochs=1)
Exemplo n.º 15
0
    def test_setter_update(self):
        """Test the prototyped setter method."""
        input_data = keras.Input(shape=(1,))
        layer = AddingPreprocessingLayer()
        output = layer(input_data)
        model = keras.Model(input_data, output)
        model._run_eagerly = test_utils.should_run_eagerly()

        layer.set_total(15)

        self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
Exemplo n.º 16
0
 def test_with_masking_layer_gru(self):
     layer_class = keras.layers.GRU
     inputs = np.random.random((2, 3, 4))
     targets = np.abs(np.random.random((2, 3, 5)))
     targets /= targets.sum(axis=-1, keepdims=True)
     model = keras.models.Sequential()
     model.add(keras.layers.Masking(input_shape=(3, 4)))
     model.add(layer_class(units=5, return_sequences=True, unroll=False))
     model.compile(loss='categorical_crossentropy',
                   optimizer='rmsprop',
                   run_eagerly=test_utils.should_run_eagerly())
     model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
Exemplo n.º 17
0
    def test_build_before_fit(self):
        # Fix for b/112433577
        model = test_utils.get_small_sequential_mlp(4, 5)
        model.compile(loss='mse',
                      optimizer='rmsprop',
                      run_eagerly=test_utils.should_run_eagerly())

        model.build((None, 6))

        x = np.random.random((2, 6))
        y = np.random.random((2, 5))
        model.fit(x, y, epochs=1)
Exemplo n.º 18
0
    def test_build_behavior(self):
        # Test graph network creation after __call__
        model = get_model()
        model(np.random.random((2, 6)))
        self.assertLen(model.weights, 4)
        self.assertTrue(model._is_graph_network)
        self.assertLen(model.inputs, 1)
        self.assertLen(model.outputs, 1)
        self.assertEqual(model.inputs[0].shape.as_list(), [2, 6])
        self.assertEqual(model.outputs[0].shape.as_list(), [2, 2])

        # Test effect of new __call__ with a different shape
        model(np.random.random((3, 6)))
        self.assertLen(model.inputs, 1)
        self.assertLen(model.outputs, 1)
        self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
        self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])
        model(np.random.random((4, 6)))
        self.assertLen(model.inputs, 1)
        self.assertLen(model.outputs, 1)
        self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
        self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])

        # Test graph network creation after build
        model = get_model()
        model.build((None, 6))
        self.assertLen(model.weights, 4)
        self.assertTrue(model._is_graph_network)
        self.assertLen(model.inputs, 1)
        self.assertLen(model.outputs, 1)
        self.assertEqual(model.inputs[0].shape.as_list(), [None, 6])
        self.assertEqual(model.outputs[0].shape.as_list(), [None, 2])

        # Test graph network creation after compile/fit
        model = get_model()
        model.compile(
            loss="mse",
            optimizer="rmsprop",
            metrics=[keras.metrics.CategoricalAccuracy()],
            run_eagerly=test_utils.should_run_eagerly(),
        )
        model.fit(np.zeros((2, 6)), np.zeros((2, 2)))
        self.assertLen(model.weights, 4)
        self.assertTrue(model._is_graph_network)
        self.assertLen(model.inputs, 1)
        self.assertLen(model.outputs, 1)
        # Inconsistency here: with eager `fit`, the model is built with shape
        # (2, 6), but with graph function `fit`, it is built with shape `(None,
        # 6)`.  This is likely due to our assumption "the batch size should be
        # dynamic" at the level of `Model`. TODO(fchollet): investigate and
        # resolve.
        self.assertEqual(model.inputs[0].shape.as_list()[-1], 6)
        self.assertEqual(model.outputs[0].shape.as_list()[-1], 2)
Exemplo n.º 19
0
    def test_clone_and_build_sequential_without_inputs_defined(self):
        model = models.Sequential(_get_layers(input_shape=None))
        model.compile(test_utils.get_v2_optimizer('rmsprop'),
                      'mse',
                      metrics=['acc', metrics.categorical_accuracy],
                      run_eagerly=test_utils.should_run_eagerly())
        self._clone_and_build_test_helper(model, 'sequential')

        inp = np.random.random((10, 4))
        out = np.random.random((10, 4))
        model.train_on_batch(inp, out)
        self._clone_and_build_test_helper(model, 'sequential')
Exemplo n.º 20
0
 def _get_compiled_multi_io_model(self):
     model = get_multi_io_model()
     model.compile(
         optimizer="rmsprop",
         loss="mse",
         metrics=[metrics.MeanSquaredError(name="mean_squared_error")],
         weighted_metrics=[
             metrics.MeanSquaredError(name="mean_squared_error_2")
         ],
         run_eagerly=test_utils.should_run_eagerly(),
     )
     return model
Exemplo n.º 21
0
    def test_dataset_fit_correctness(self):
        class SumLayer(keras.layers.Layer):
            def build(self, _):
                self.w = self.add_weight('w', ())

            def call(self, inputs):
                return keras.backend.sum(inputs, axis=1,
                                         keepdims=True) + self.w * 0

        model = keras.Sequential([SumLayer(input_shape=(2, ))])
        model.compile('rmsprop',
                      loss='mae',
                      run_eagerly=test_utils.should_run_eagerly())

        inputs = np.zeros((40, 2), dtype=np.float32)
        inputs[10:20, :] = 2
        inputs[20:30, :] = 1
        inputs[30:, :] = 4
        targets = np.zeros((40, 1), dtype=np.float32)

        # Test correctness with `steps_per_epoch`.
        train_dataset = tf.data.Dataset.from_tensor_slices(
            (inputs, targets)).batch(10)
        val_dataset = tf.data.Dataset.from_tensor_slices(
            (inputs, targets)).batch(10)
        history = model.fit(train_dataset,
                            epochs=2,
                            steps_per_epoch=2,
                            verbose=1,
                            validation_data=val_dataset,
                            validation_steps=2)
        self.assertAllClose(history.history['loss'],
                            [inputs[:20].sum() / 20, inputs[20:].sum() / 20])
        # The validation dataset will be reset at the end of each validation run.
        self.assertAllClose(history.history['val_loss'],
                            [inputs[:20].sum() / 20, inputs[:20].sum() / 20])

        # Test correctness with dataset reset.
        train_dataset = tf.data.Dataset.from_tensor_slices(
            (inputs, targets)).batch(10)
        val_dataset = tf.data.Dataset.from_tensor_slices(
            (inputs, targets)).batch(10)
        history = model.fit(train_dataset,
                            epochs=2,
                            verbose=1,
                            validation_data=val_dataset)
        self.assertAllClose(
            history.history['loss'],
            [inputs.sum() / 40, inputs.sum() / 40])
        self.assertAllClose(
            history.history['val_loss'],
            [inputs.sum() / 40, inputs.sum() / 40])
Exemplo n.º 22
0
  def test_generator_input_to_fit_eval_predict(self):
    val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

    def ones_generator():
      while True:
        yield np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

    model = test_utils.get_small_mlp(
        num_hidden=10, num_classes=1, input_dim=10)

    model.compile(
        rmsprop.RMSprop(0.001),
        'binary_crossentropy',
        run_eagerly=test_utils.should_run_eagerly())
    model.fit(
        ones_generator(),
        steps_per_epoch=2,
        validation_data=val_data,
        epochs=2)
    model.evaluate(ones_generator(), steps=2)
    model.predict(ones_generator(), steps=2)

    # Test with a changing batch size
    model = test_utils.get_small_mlp(
        num_hidden=3, num_classes=4, input_dim=2)
    model.compile(
        loss='mse',
        optimizer=rmsprop.RMSprop(1e-3),
        metrics=['mae', metrics_module.CategoricalAccuracy()])
    model.fit_generator(custom_generator_changing_batch_size(),
                        steps_per_epoch=5,
                        epochs=1,
                        verbose=1,
                        max_queue_size=10,
                        use_multiprocessing=False)
    model.fit_generator(custom_generator_changing_batch_size(),
                        steps_per_epoch=5,
                        epochs=1,
                        verbose=1,
                        max_queue_size=10,
                        use_multiprocessing=False,
                        validation_data=custom_generator_changing_batch_size(),
                        validation_steps=10)

    model.fit(
        custom_generator_changing_batch_size(),
        steps_per_epoch=5,
        validation_data=custom_generator_changing_batch_size(),
        validation_steps=10,
        epochs=2)
    model.evaluate(custom_generator_changing_batch_size(), steps=5)
    model.predict(custom_generator_changing_batch_size(), steps=5)
Exemplo n.º 23
0
    def test_activity_regularizer_batch_independent(self):
        inputs = layers.Input(shape=(10, ))
        x = layers.Dense(10, activation='relu',
                         activity_regularizer='l2')(inputs)
        outputs = layers.Dense(1, activation='sigmoid')(x)
        model = Model(inputs, outputs)

        optimizer = RMSPropOptimizer(learning_rate=0.001)
        model.compile(optimizer, run_eagerly=test_utils.should_run_eagerly())

        loss_small_batch = model.test_on_batch(np.ones((10, 10), 'float32'))
        loss_big_batch = model.test_on_batch(np.ones((20, 10), 'float32'))
        self.assertAlmostEqual(loss_small_batch, loss_big_batch, places=4)
Exemplo n.º 24
0
    def test_model_backend_float64_use_cases(self):
        # Test case for GitHub issue 19318
        floatx = keras.backend.floatx()
        keras.backend.set_floatx('float64')

        x = keras.Input((5, ))
        y = keras.layers.Dense(1)(x)
        model = keras.models.Model(x, y)
        model.compile(test_utils.get_v2_optimizer('rmsprop'),
                      'mse',
                      run_eagerly=test_utils.should_run_eagerly())

        keras.backend.set_floatx(floatx)
    def test_ragged_tensor_rebatched_outputs(self):
        # Create a model that accepts an input, converts it to Ragged, and
        # converts the ragged tensor back to a dense tensor.
        layers = [ToRagged(padding=0)]
        model = test_utils.get_model_from_layers(layers, input_shape=(None, ))
        model._run_eagerly = test_utils.should_run_eagerly()

        # Define some input data with additional padding.
        input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]])
        output = model.predict(input_data, batch_size=2)

        expected_values = [[1], [2, 3], [4], [5, 6]]
        self.assertAllEqual(expected_values, output)
Exemplo n.º 26
0
 def test_regularization_shared_layer(self, regularizer):
     dense_layer = keras.layers.Dense(
         NUM_CLASSES,
         kernel_regularizer=regularizer,
         activity_regularizer=regularizer,
     )
     model = self.create_multi_input_model_from(dense_layer, dense_layer)
     model.compile(
         loss="categorical_crossentropy",
         optimizer="sgd",
         run_eagerly=test_utils.should_run_eagerly(),
     )
     self.assertLen(model.losses, 5)
Exemplo n.º 27
0
  def test_clone_and_build_non_compiled_model(self):
    inp = np.random.random((10, 4))
    out = np.random.random((10, 4))

    model = _get_model()

    with self.assertRaisesRegex(ValueError, 'has not been compiled'):
      models.clone_and_build_model(model, compile_clone=True)

    is_subclassed = (test_utils.get_model_type() == 'subclass')
    # With placeholder creation
    new_model = models.clone_and_build_model(
        model, compile_clone=False, in_place_reset=is_subclassed)
    with self.assertRaisesRegex(RuntimeError, 'must compile'):
      new_model.evaluate(inp, out)
    with self.assertRaisesRegex(RuntimeError, 'must compile'):
      new_model.train_on_batch(inp, out)
    new_model.compile(
        test_utils.get_v2_optimizer('rmsprop'),
        'mse',
        run_eagerly=test_utils.should_run_eagerly())
    new_model.train_on_batch(inp, out)

    # Create new tensors for inputs.
    input_a = keras.Input(shape=(4,))
    new_model = models.clone_and_build_model(
        model,
        input_tensors=input_a,
        compile_clone=False,
        in_place_reset=is_subclassed)
    with self.assertRaisesRegex(RuntimeError, 'must compile'):
      new_model.evaluate(inp, out)
    with self.assertRaisesRegex(RuntimeError, 'must compile'):
      new_model.train_on_batch(inp, out)
    new_model.compile(
        test_utils.get_v2_optimizer('rmsprop'),
        'mse',
        run_eagerly=test_utils.should_run_eagerly())
    new_model.train_on_batch(inp, out)
Exemplo n.º 28
0
    def test_pre_build_adapt_update_numpy(self):
        """Test that preproc layers can adapt() before build() is called."""
        input_dataset = np.array([1, 2, 3, 4, 5])

        layer = AddingPreprocessingLayer()
        layer.adapt(input_dataset)

        input_data = keras.Input(shape=(1,))
        output = layer(input_data)
        model = keras.Model(input_data, output)
        model._run_eagerly = test_utils.should_run_eagerly()

        self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
Exemplo n.º 29
0
    def test_activity_regularizer_loss_value(self):
        layer = layers.Dense(1,
                             kernel_initializer='zeros',
                             bias_initializer='ones',
                             activity_regularizer='l2')

        model = test_utils.get_model_from_layers([layer], input_shape=(10, ))

        x = np.ones((10, 10), 'float32')
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        model.compile(optimizer, run_eagerly=test_utils.should_run_eagerly())
        loss = model.test_on_batch(x)
        self.assertAlmostEqual(0.01, loss, places=4)
Exemplo n.º 30
0
    def test_loss_on_model_fit(self):
        inputs = Input(shape=(1, ))
        targets = Input(shape=(1, ))
        outputs = test_utils.Bias()(inputs)
        model = Model([inputs, targets], outputs)
        model.add_loss(MAE()(targets, outputs))
        model.add_loss(tf.reduce_mean(mae(targets, outputs)))
        model.compile(optimizer_v2.gradient_descent.SGD(0.05),
                      run_eagerly=test_utils.should_run_eagerly())

        history = model.fit([self.x, self.y], batch_size=3, epochs=5)
        self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2],
                            1e-3)