Beispiel #1
0
    def test_revive(self):
        input_shape = None
        if testing_utils.get_model_type() == 'functional':
            input_shape = (2, 3)

        layer_with_config = CustomLayerWithConfig(1., 2)
        layer_without_config = CustomLayerNoConfig(3., 4)
        subclassed_with_config = SubclassedModelWithConfig(4., 6.)
        subclassed_without_config = SubclassedModelNoConfig(7., 8.)

        inputs = keras.Input((2, 3))
        x = CustomLayerWithConfig(1., 2)(inputs)
        x = CustomLayerNoConfig(3., 4)(x)
        x = SubclassedModelWithConfig(4., 6.)(x)
        x = SubclassedModelNoConfig(7., 8.)(x)
        inner_model_functional = keras.Model(inputs, x)

        inner_model_sequential = keras.Sequential([
            CustomLayerWithConfig(1., 2),
            CustomLayerNoConfig(3., 4),
            SubclassedModelWithConfig(4., 6.),
            SubclassedModelNoConfig(7., 8.)
        ])

        class SubclassedModel(keras.Model):
            def __init__(self):
                super(SubclassedModel, self).__init__()
                self.all_layers = [
                    CustomLayerWithConfig(1., 2),
                    CustomLayerNoConfig(3., 4),
                    SubclassedModelWithConfig(4., 6.),
                    SubclassedModelNoConfig(7., 8.)
                ]

            def call(self, inputs):
                x = inputs
                for layer in self.all_layers:
                    x = layer(x)
                return x

        inner_model_subclassed = SubclassedModel()

        layers = [
            layer_with_config, layer_without_config, subclassed_with_config,
            subclassed_without_config, inner_model_functional,
            inner_model_sequential, inner_model_subclassed
        ]
        model = testing_utils.get_model_from_layers(layers,
                                                    input_shape=input_shape)
        # Run data through the Model to create save spec and weights.
        model.predict(np.ones((10, 2, 3)), batch_size=10)

        # Test that the correct checkpointed values are loaded, whether the layer is
        # created from the config or SavedModel.
        layer_with_config.c.assign(2 * layer_with_config.c)
        layer_without_config.c.assign(3 * layer_without_config.c)

        model.save(self.path, save_format='tf')
        revived = keras_load.load(self.path)
        self._assert_revived_correctness(model, revived)
Beispiel #2
0
    def test_lambda_skip_state_variable_from_initializer(self):
        # Force the initializers to use the tf.random.Generator, which will contain
        # the state variable.
        kernel_initializer = initializers.RandomNormalV2()
        kernel_initializer._random_generator._force_generator = True
        dense = keras.layers.Dense(1,
                                   use_bias=False,
                                   kernel_initializer=kernel_initializer)

        def lambda_fn(x):
            return dense(x + 1)  # Dense layer is built on first call

        # While it is generally not advised to mix Variables with Lambda layers, if
        # the variables are explicitly set as attributes then they are still
        # tracked. This is consistent with the base Layer behavior.
        layer = keras.layers.Lambda(lambda_fn)
        layer.dense = dense

        model = testing_utils.get_model_from_layers([layer],
                                                    input_shape=(10, ))
        model.compile(keras.optimizer_v2.gradient_descent.SGD(0.1),
                      'mae',
                      run_eagerly=testing_utils.should_run_eagerly())
        x, y = np.ones((10, 10), 'float32'), 2 * np.ones((10, 10), 'float32')
        model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y))
        self.assertLen(model.trainable_weights, 1)
    def test_metrics_correctness_with_dataset(self):
        layers = [
            keras.layers.Dense(8,
                               activation='relu',
                               input_dim=4,
                               kernel_initializer='ones'),
            keras.layers.Dense(1,
                               activation='sigmoid',
                               kernel_initializer='ones')
        ]

        model = testing_utils.get_model_from_layers(layers, (4, ))

        model.compile(loss='binary_crossentropy',
                      metrics=['accuracy',
                               metrics_module.BinaryAccuracy()],
                      optimizer='rmsprop',
                      run_eagerly=testing_utils.should_run_eagerly())

        np.random.seed(123)
        x = np.random.randint(10, size=(100, 4)).astype(np.float32)
        y = np.random.randint(2, size=(100, 1)).astype(np.float32)
        dataset = tf.data.Dataset.from_tensor_slices((x, y))
        dataset = dataset.batch(10)
        outs = model.evaluate(dataset, steps=10)
        self.assertEqual(np.around(outs[1], decimals=1), 0.5)
        self.assertEqual(np.around(outs[2], decimals=1), 0.5)

        y = np.zeros((100, 1), dtype=np.float32)
        dataset = tf.data.Dataset.from_tensor_slices((x, y))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        outs = model.evaluate(dataset, steps=10)
        self.assertEqual(outs[1], 0.)
        self.assertEqual(outs[2], 0.)
Beispiel #4
0
  def test_metadata_input_spec(self):
    class LayerWithNestedSpec(keras.layers.Layer):

      def __init__(self):
        super(LayerWithNestedSpec, self).__init__()
        self.input_spec = {
            'a': keras.layers.InputSpec(max_ndim=3, axes={-1: 2}),
            'b': keras.layers.InputSpec(shape=(None, 2, 3), dtype='int32')}

      @property
      def _use_input_spec_as_call_signature(self):
        return True

    layer = LayerWithNestedSpec()
    saved_model_dir = self._save_model_dir()
    model = testing_utils.get_model_from_layers(
        [layer], model_type='subclass')
    model({'a': tf.constant([[2, 4]]),
           'b': tf.ones([1, 2, 3], dtype=tf.int32)})
    model.save(saved_model_dir, save_format='tf')
    loaded_model = keras_load.load(saved_model_dir)
    loaded = loaded_model.layers[-1]
    self.assertEqual(3, loaded.input_spec['a'].max_ndim)
    self.assertEqual({-1: 2}, loaded.input_spec['a'].axes)
    self.assertAllEqual([None, 2, 3], loaded.input_spec['b'].shape)
    self.assertEqual('int32', loaded.input_spec['b'].dtype)
Beispiel #5
0
 def test_must_restore_from_config_registration(self):
   layer = GlobalLayerThatShouldFailIfNotAdded()
   saved_model_dir = self._save_model_dir()
   model = testing_utils.get_model_from_layers(
       [layer], input_shape=[3], model_type='functional')
   model.save(saved_model_dir, save_format='tf')
   _ = keras_load.load(saved_model_dir)
Beispiel #6
0
    def test_vector_classification(self):
        np.random.seed(1337)
        (x_train, y_train), _ = testing_utils.get_test_data(train_samples=100,
                                                            test_samples=0,
                                                            input_shape=(10, ),
                                                            num_classes=2)
        y_train = np_utils.to_categorical(y_train)

        model = testing_utils.get_model_from_layers(
            [
                keras.layers.Dense(16, activation='relu'),
                keras.layers.Dropout(0.1),
                keras.layers.Dense(y_train.shape[-1], activation='softmax')
            ],
            input_shape=x_train.shape[1:])
        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizer_v2.adam.Adam(0.005),
                      metrics=['acc'],
                      run_eagerly=testing_utils.should_run_eagerly())
        history = model.fit(x_train,
                            y_train,
                            epochs=10,
                            batch_size=10,
                            validation_data=(x_train, y_train),
                            verbose=2)
        self.assertGreater(history.history['val_acc'][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Beispiel #7
0
 def _get_simple_bias_model(self):
     model = testing_utils.get_model_from_layers([testing_utils.Bias()],
                                                 input_shape=(1, ))
     model.compile(keras.optimizer_v2.gradient_descent.SGD(0.1),
                   'mae',
                   run_eagerly=testing_utils.should_run_eagerly())
     return model
Beispiel #8
0
def add_metric_step(defun):
    optimizer = keras.optimizer_v2.rmsprop.RMSprop()
    model = testing_utils.get_model_from_layers([
        LayerWithMetrics(),
        keras.layers.Dense(1, kernel_initializer='zeros', activation='softmax')
    ],
                                                input_shape=(10, ))

    def train_step(x, y):
        with tf.GradientTape() as tape:
            y_pred_1 = model(x)
            y_pred_2 = model(2 * x)
            y_pred = y_pred_1 + y_pred_2
            loss = keras.losses.mean_squared_error(y, y_pred)
        gradients = tape.gradient(loss, model.trainable_weights)
        optimizer.apply_gradients(zip(gradients, model.trainable_weights))
        assert len(model.metrics) == 2
        return [m.result() for m in model.metrics]

    if defun:
        train_step = tf.function(train_step)

    x, y = tf.ones((10, 10)), tf.zeros((10, 1))
    metrics = train_step(x, y)
    assert np.allclose(metrics[0], 1.5)
    assert np.allclose(metrics[1], 1.5)
    return metrics
Beispiel #9
0
  def test_activity_regularizer(self):
    loss = {}
    for reg in [None, 'l2']:
      model_layers = [
          layers.Dense(
              10,
              activation='relu',
              activity_regularizer=reg,
              kernel_initializer='ones',
              use_bias=False),
          layers.Dense(
              1,
              activation='sigmoid',
              kernel_initializer='ones',
              use_bias=False),
      ]

      model = testing_utils.get_model_from_layers(
          model_layers, input_shape=(10,))

      x = np.ones((10, 10), 'float32')
      y = np.zeros((10, 1), 'float32')

      optimizer = RMSPropOptimizer(learning_rate=0.001)
      model.compile(
          optimizer,
          'binary_crossentropy',
          run_eagerly=testing_utils.should_run_eagerly())
      model.fit(x, y, batch_size=2, epochs=5)
      loss[reg] = model.evaluate(x, y)
    self.assertLess(loss[None], loss['l2'])
 def test_layer_as_activation(self):
     layer = keras.layers.Dense(1, activation=keras.layers.ReLU())
     model = testing_utils.get_model_from_layers([layer],
                                                 input_shape=(10, ))
     model.compile('sgd',
                   'mse',
                   run_eagerly=testing_utils.should_run_eagerly())
     model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2)
Beispiel #11
0
 def test_zero_regularization(self):
     # Verifies that training with zero regularization works.
     x, y = np.ones((10, 10)), np.ones((10, 3))
     model = testing_utils.get_model_from_layers([
         keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))
     ],
                                                 input_shape=(10, ))
     model.compile('sgd',
                   'mse',
                   run_eagerly=testing_utils.should_run_eagerly())
     model.fit(x, y, batch_size=5, epochs=1)
  def test_internal_sparse_tensors(self):
    # Create a model that accepts an input, converts it to Sparse, and
    # converts the sparse tensor back to a dense tensor.
    layers = [ToSparse(), ToDense(default_value=-1)]
    model = testing_utils.get_model_from_layers(layers, input_shape=(None,))

    # Define some input data with additional padding.
    input_data = np.array([[1, 0, 0], [2, 3, 0]])
    expected_output = np.array([[1, -1, -1], [2, 3, -1]])
    output = model.predict(input_data)
    self.assertAllEqual(expected_output, output)
Beispiel #13
0
    def test_save_and_load(self):
        saved_model_dir = self._save_model_dir()
        save_format = testing_utils.get_save_format()
        save_kwargs = testing_utils.get_save_kwargs()

        if ((save_format == 'h5' or not save_kwargs.get('save_traces', True))
                and testing_utils.get_model_type() == 'subclass'):
            # HDF5 format currently does not allow saving subclassed models.
            # When saving with `save_traces=False`, the subclassed model must have a
            # get_config/from_config, which the autogenerated model does not have.
            return

        with self.cached_session():
            model = testing_utils.get_model_from_layers([
                keras.layers.Dense(2),
                keras.layers.RepeatVector(3),
                keras.layers.TimeDistributed(keras.layers.Dense(3))
            ],
                                                        input_shape=(3, ))
            model.compile(
                loss=keras.losses.MSE,
                optimizer=keras.optimizer_v2.rmsprop.RMSprop(lr=0.0001),
                metrics=[
                    keras.metrics.categorical_accuracy,
                    keras.metrics.CategoricalCrossentropy(
                        name='cce', label_smoothing=tf.constant(0.2)),
                ],
                weighted_metrics=[
                    keras.metrics.categorical_crossentropy,
                    keras.metrics.CategoricalCrossentropy(
                        name='cce', label_smoothing=tf.constant(0.2)),
                ],
                sample_weight_mode='temporal')

            x = np.random.random((1, 3))
            y = np.random.random((1, 3, 3))
            model.train_on_batch(x, y)

            out = model.predict(x)
            keras.models.save_model(model,
                                    saved_model_dir,
                                    save_format=save_format,
                                    **save_kwargs)

            loaded_model = keras.models.load_model(saved_model_dir)
            self._assert_same_weights_and_metrics(model, loaded_model)

            out2 = loaded_model.predict(x)
            self.assertAllClose(out, out2, atol=1e-05)

            eval_out = model.evaluate(x, y)
            eval_out2 = loaded_model.evaluate(x, y)
            self.assertArrayNear(eval_out, eval_out2, 0.001)
Beispiel #14
0
  def test_must_restore_from_config_fails_if_layer_is_not_in_scope(self):

    class LayerThatShouldFailIfNotAdded(keras.layers.Layer):
      _must_restore_from_config = True

    layer = LayerThatShouldFailIfNotAdded()
    saved_model_dir = self._save_model_dir()
    model = testing_utils.get_model_from_layers(
        [layer], input_shape=[3], model_type='functional')
    model.save(saved_model_dir, save_format='tf')
    with self.assertRaisesRegex(RuntimeError, 'Unable to restore a layer of'):
      _ = keras_load.load(saved_model_dir)
  def test_ragged_tensor_rebatched_outputs(self):
    # Create a model that accepts an input, converts it to Ragged, and
    # converts the ragged tensor back to a dense tensor.
    layers = [ToRagged(padding=0)]
    model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
    model._run_eagerly = testing_utils.should_run_eagerly()

    # Define some input data with additional padding.
    input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]])
    output = model.predict(input_data, batch_size=2)

    expected_values = [[1], [2, 3], [4], [5, 6]]
    self.assertAllEqual(expected_values, output)
Beispiel #16
0
  def test_must_restore_from_config_custom_object_scope(self):

    class LayerThatShouldFailIfNotAdded(keras.layers.Layer):
      _must_restore_from_config = True

    layer = LayerThatShouldFailIfNotAdded()
    model = testing_utils.get_model_from_layers(
        [layer], input_shape=[3], model_type='functional')
    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf')
    with generic_utils.CustomObjectScope(
        {'LayerThatShouldFailIfNotAdded': LayerThatShouldFailIfNotAdded}):
      _ = keras_load.load(saved_model_dir)
 def _get_model(self):
   x = layers.Dense(3, kernel_initializer='ones', trainable=False)
   out = layers.Dense(
       1, kernel_initializer='ones', name='output', trainable=False)
   model = testing_utils.get_model_from_layers([x, out], input_shape=(1,))
   model.compile(
       optimizer='rmsprop',
       loss='mse',
       metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
       weighted_metrics=[
           metrics.MeanSquaredError(name='mean_squared_error_2')
       ],
       run_eagerly=testing_utils.should_run_eagerly())
   return model
Beispiel #18
0
  def test_loss_callable_on_model_fit(self):
    model = testing_utils.get_model_from_layers([testing_utils.Bias()],
                                                input_shape=(1,))

    def callable_loss():
      return tf.reduce_sum(model.weights)

    model.add_loss(callable_loss)
    model.compile(
        optimizer_v2.gradient_descent.SGD(0.1),
        run_eagerly=testing_utils.should_run_eagerly())

    history = model.fit(self.x, batch_size=3, epochs=5)
    self.assertAllClose(history.history['loss'], [0., -.1, -.2, -.3, -.4], 1e-3)
Beispiel #19
0
    def test_generator_dynamic_shapes(self):

        x = [
            'I think juice is great',
            'unknown is the best language since slicedbread',
            'a a a a a a a',
            'matmul'
            'Yaks are also quite nice',
        ]
        y = [1, 0, 0, 1, 1]

        vocab = {
            word: i + 1
            for i, word in enumerate(
                sorted(set(itertools.chain(*[i.split() for i in x]))))
        }

        def data_gen(batch_size=2):
            np.random.seed(0)
            data = list(zip(x, y)) * 10
            np.random.shuffle(data)

            def pack_and_pad(queue):
                x = [[vocab[j] for j in i[0].split()] for i in queue]
                pad_len = max(len(i) for i in x)
                x = np.array([i + [0] * (pad_len - len(i)) for i in x])
                y = np.array([i[1] for i in queue])
                del queue[:]
                return x, y[:, np.newaxis]

            queue = []
            for i, element in enumerate(data):
                queue.append(element)
                if not (i + 1) % batch_size:
                    yield pack_and_pad(queue)

            if queue:
                # Last partial batch
                yield pack_and_pad(queue)

        model = testing_utils.get_model_from_layers([
            layers_module.Embedding(input_dim=len(vocab) + 1, output_dim=4),
            layers_module.SimpleRNN(units=1),
            layers_module.Activation('sigmoid')
        ],
                                                    input_shape=(None, ))

        model.compile(loss=losses.binary_crossentropy, optimizer='sgd')
        model.fit(data_gen(), epochs=1, steps_per_epoch=5)
Beispiel #20
0
  def test_transitive_variable_creation(self):
    dense = keras.layers.Dense(1, use_bias=False, kernel_initializer='ones')
    def bad_lambda_fn(x):
      return dense(x + 1)  # Dense layer is built on first call

    expected_error = textwrap.dedent(r'''
    (    )?The following Variables were created within a Lambda layer \(bias_dense\)
    (    )?but are not tracked by said layer:
    (    )?  <tf.Variable \'.*bias_dense/dense/kernel:0\'.+
    (    )?The layer cannot safely ensure proper Variable reuse.+''')

    with self.assertRaisesRegex(ValueError, expected_error):
      layer = keras.layers.Lambda(bad_lambda_fn, name='bias_dense')
      model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
      model(tf.ones((4, 1)))
    def test_activity_regularizer_loss_value(self):
        layer = layers.Dense(1,
                             kernel_initializer='zeros',
                             bias_initializer='ones',
                             activity_regularizer='l2')

        model = testing_utils.get_model_from_layers([layer],
                                                    input_shape=(10, ))

        x = np.ones((10, 10), 'float32')
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        model.compile(optimizer,
                      run_eagerly=testing_utils.should_run_eagerly())
        loss = model.test_on_batch(x)
        self.assertAlmostEqual(0.01, loss, places=4)
Beispiel #22
0
    def test_training_arg_propagation(self, defun):

        model = testing_utils.get_model_from_layers([LayerWithTrainingArg()],
                                                    input_shape=(1, ))

        def train_step(x):
            return model(x), model(x, training=False), model(x, training=True)

        if defun:
            train_step = tf.function(train_step)

        x = tf.ones((1, 1))
        results = train_step(x)
        self.assertAllClose(results[0], tf.zeros((1, 1)))
        self.assertAllClose(results[1], tf.zeros((1, 1)))
        self.assertAllClose(results[2], tf.ones((1, 1)))
Beispiel #23
0
  def test_creation_inside_lambda(self):
    def lambda_fn(x):
      scale = tf.Variable(1., trainable=True, name='scale')
      shift = tf.Variable(1., trainable=True, name='shift')
      return x * scale + shift

    expected_error = textwrap.dedent(r'''
    (    )?The following Variables were created within a Lambda layer \(shift_and_scale\)
    (    )?but are not tracked by said layer:
    (    )?  <tf.Variable \'.*shift_and_scale/scale:0\'.+
    (    )?  <tf.Variable \'.*shift_and_scale/shift:0\'.+
    (    )?The layer cannot safely ensure proper Variable reuse.+''')

    with self.assertRaisesRegex(ValueError, expected_error):
      layer = keras.layers.Lambda(lambda_fn, name='shift_and_scale')
      model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
      model(tf.ones((4, 1)))
  def test_validation_dataset_with_no_step_arg(self):
    # Create a model that learns y=Mx.
    layers = [core.Dense(1)]
    model = testing_utils.get_model_from_layers(layers, input_shape=(1,))
    model.compile(loss="mse", optimizer="adam", metrics=["mean_absolute_error"])

    train_dataset = self.create_dataset(num_samples=200, batch_size=10)
    eval_dataset = self.create_dataset(num_samples=50, batch_size=25)

    history = model.fit(x=train_dataset, validation_data=eval_dataset, epochs=2)
    evaluation = model.evaluate(x=eval_dataset)

    # If the fit call used the entire dataset, then the final val MAE error
    # from the fit history should be equal to the final element in the output
    # of evaluating the model on the same eval dataset.
    self.assertAlmostEqual(history.history["val_mean_absolute_error"][-1],
                           evaluation[-1], places=5)
  def test_training_internal_ragged_tensors(self):
    # Create a model that implements y=Mx. This is easy to learn and will
    # demonstrate appropriate gradient passing. (We have to use RaggedTensors
    # for this test, as ToSparse() doesn't support gradient propagation through
    # the layer.) TODO(b/124796939): Investigate this.
    layers = [core.Dense(2), ToRagged(padding=0), ToDense(default_value=-1)]
    model = testing_utils.get_model_from_layers(layers, input_shape=(1,))

    input_data = np.random.rand(1024, 1)
    expected_data = np.concatenate((input_data * 3, input_data * .5), axis=-1)

    model.compile(loss="mse", optimizer="adam", **get_test_mode_kwargs())
    history = model.fit(input_data, expected_data, epochs=10, verbose=0)

    # If the model trained, the loss stored at history[0] should be different
    # than the one stored at history[-1].
    self.assertNotEqual(history.history["loss"][-1], history.history["loss"][0])
  def test_sparse_tensor_outputs(self):
    # Create a model that accepts an input, converts it to Ragged, and
    # converts the ragged tensor back to a dense tensor.
    layers = [ToSparse()]
    model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
    model._run_eagerly = testing_utils.should_run_eagerly()

    # Define some input data with additional padding.
    input_data = np.array([[1, 0, 0], [2, 3, 0]])
    output = model.predict(input_data)

    expected_indices = np.array([[0, 0], [1, 0], [1, 1]])
    expected_values = np.array([1, 2, 3])
    expected_dense_shape = np.array([2, 3])

    self.assertAllEqual(output.indices, expected_indices)
    self.assertAllEqual(output.values, expected_values)
    self.assertAllEqual(output.dense_shape, expected_dense_shape)
Beispiel #27
0
    def test_simple_build_with_constant(self):
        class BuildConstantLayer(keras.layers.Layer):
            def build(self, input_shape):
                self.b = tf.convert_to_tensor(2.0)

            def call(self, inputs):
                return self.b * inputs

        layer = BuildConstantLayer()
        model = testing_utils.get_model_from_layers(
            [layer, keras.layers.Dense(1)], input_shape=(1, ))

        x = tf.convert_to_tensor([[3.0]])
        self.assertEqual(tf_utils.is_symbolic_tensor(model(x)),
                         not tf.executing_eagerly())
        self.assertEqual(tf_utils.is_symbolic_tensor(layer(x)),
                         not tf.executing_eagerly())
        self.assertAllClose(keras.backend.get_value(layer(x)), [[6.0]])
Beispiel #28
0
  def test_warns_on_variable_capture(self):
    v = tf.Variable(1., trainable=True)
    def lambda_fn(x):
      return x * v

    expected_warning = textwrap.dedent(r'''
    (    )?The following Variables were used a Lambda layer\'s call \(lambda\), but
    (    )?are not present in its tracked objects:
    (    )?  <tf.Variable \'.*Variable:0\'.+
    (    )?It is possible that this is intended behavior.+''')

    layer = keras.layers.Lambda(lambda_fn)
    def patched_warn(msg):
      raise ValueError(msg)
    layer._warn = patched_warn

    with self.assertRaisesRegex(ValueError, expected_warning):
      model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
      model(tf.ones((4, 1)))
Beispiel #29
0
def add_loss_step(defun):
    optimizer = keras.optimizer_v2.adam.Adam()
    model = testing_utils.get_model_from_layers([LayerWithLosses()],
                                                input_shape=(10, ))

    def train_step(x):
        with tf.GradientTape() as tape:
            model(x)
            assert len(model.losses) == 2
            loss = tf.reduce_sum(model.losses)
        gradients = tape.gradient(loss, model.trainable_weights)
        optimizer.apply_gradients(zip(gradients, model.trainable_weights))
        return loss

    if defun:
        train_step = tf.function(train_step)

    x = tf.ones((10, 10))
    return train_step(x)
Beispiel #30
0
    def test_vector_classification_shared_model(self):
        # Test that Sequential models that feature internal updates
        # and internal losses can be shared.
        np.random.seed(1337)
        (x_train, y_train), _ = testing_utils.get_test_data(train_samples=100,
                                                            test_samples=0,
                                                            input_shape=(10, ),
                                                            num_classes=2)
        y_train = np_utils.to_categorical(y_train)

        base_model = testing_utils.get_model_from_layers(
            [
                keras.layers.Dense(
                    16,
                    activation='relu',
                    kernel_regularizer=keras.regularizers.l2(1e-5),
                    bias_regularizer=keras.regularizers.l2(1e-5)),
                keras.layers.BatchNormalization()
            ],
            input_shape=x_train.shape[1:])
        x = keras.layers.Input(x_train.shape[1:])
        y = base_model(x)
        y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
        model = keras.models.Model(x, y)
        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizer_v2.adam.Adam(0.005),
                      metrics=['acc'],
                      run_eagerly=testing_utils.should_run_eagerly())
        self.assertLen(model.losses, 2)
        if not tf.executing_eagerly():
            self.assertLen(model.get_updates_for(x), 2)
        history = model.fit(x_train,
                            y_train,
                            epochs=10,
                            batch_size=10,
                            validation_data=(x_train, y_train),
                            verbose=2)
        self.assertGreater(history.history['val_acc'][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))