Пример #1
0
 def _get_simple_bias_model(self):
     model = test_utils.get_model_from_layers([test_utils.Bias()],
                                              input_shape=(1, ))
     model.compile(keras.optimizers.optimizer_v2.gradient_descent.SGD(0.1),
                   'mae',
                   run_eagerly=test_utils.should_run_eagerly())
     return model
Пример #2
0
    def test_image_classification(self):
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(train_samples=100,
                                                         test_samples=0,
                                                         input_shape=(10, 10,
                                                                      3),
                                                         num_classes=2)
        y_train = utils.to_categorical(y_train)

        layers = [
            keras.layers.Conv2D(4, 3, padding='same', activation='relu'),
            keras.layers.Conv2D(8, 3, padding='same'),
            keras.layers.BatchNormalization(),
            keras.layers.Conv2D(8, 3, padding='same'),
            keras.layers.Flatten(),
            keras.layers.Dense(y_train.shape[-1], activation='softmax')
        ]
        model = test_utils.get_model_from_layers(layers,
                                                 input_shape=x_train.shape[1:])
        model.compile(loss='categorical_crossentropy',
                      optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),
                      metrics=['acc'],
                      run_eagerly=test_utils.should_run_eagerly())
        history = model.fit(x_train,
                            y_train,
                            epochs=10,
                            batch_size=10,
                            validation_data=(x_train, y_train),
                            verbose=2)
        self.assertGreater(history.history['val_acc'][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Пример #3
0
    def test_activity_regularizer(self):
        loss = {}
        for reg in [None, 'l2']:
            model_layers = [
                layers.Dense(10,
                             activation='relu',
                             activity_regularizer=reg,
                             kernel_initializer='ones',
                             use_bias=False),
                layers.Dense(1,
                             activation='sigmoid',
                             kernel_initializer='ones',
                             use_bias=False),
            ]

            model = test_utils.get_model_from_layers(model_layers,
                                                     input_shape=(10, ))

            x = np.ones((10, 10), 'float32')
            y = np.zeros((10, 1), 'float32')

            optimizer = RMSPropOptimizer(learning_rate=0.001)
            model.compile(optimizer,
                          'binary_crossentropy',
                          run_eagerly=test_utils.should_run_eagerly())
            model.fit(x, y, batch_size=2, epochs=5)
            loss[reg] = model.evaluate(x, y)
        self.assertLess(loss[None], loss['l2'])
Пример #4
0
    def test_lambda_with_variable_in_model(self):
        v = tf.Variable(1.0, trainable=True)

        def lambda_fn(x, v):
            return x * v

        # While it is generally not advised to mix Variables with Lambda layers,
        # if the variables are explicitly set as attributes then they are still
        # tracked. This is consistent with the base Layer behavior.
        layer = keras.layers.Lambda(lambda_fn, arguments={"v": v})
        self.assertLen(layer.trainable_weights, 0)
        layer.v = v
        self.assertLen(layer.trainable_weights, 1)

        model = test_utils.get_model_from_layers([layer], input_shape=(10, ))
        model.compile(
            keras.optimizers.optimizer_v2.gradient_descent.SGD(0.1),
            "mae",
            run_eagerly=test_utils.should_run_eagerly(),
        )
        x, y = np.ones((10, 10), "float32"), 2 * np.ones((10, 10), "float32")
        model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y))
        self.assertLen(model.trainable_weights, 1)
        self.assertAllClose(
            keras.backend.get_value(model.trainable_weights[0]), 2.0)
Пример #5
0
    def test_lambda_skip_state_variable_from_initializer(self):
        # Force the initializers to use the tf.random.Generator, which will
        # contain the state variable.
        kernel_initializer = initializers.RandomNormalV2()
        kernel_initializer._random_generator._rng_type = (
            kernel_initializer._random_generator.RNG_STATEFUL)
        dense = keras.layers.Dense(1,
                                   use_bias=False,
                                   kernel_initializer=kernel_initializer)

        def lambda_fn(x):
            return dense(x + 1)  # Dense layer is built on first call

        # While it is generally not advised to mix Variables with Lambda layers,
        # if the variables are explicitly set as attributes then they are still
        # tracked. This is consistent with the base Layer behavior.
        layer = keras.layers.Lambda(lambda_fn)
        layer.dense = dense

        model = test_utils.get_model_from_layers([layer], input_shape=(10, ))
        model.compile(
            keras.optimizers.optimizer_v2.gradient_descent.SGD(0.1),
            "mae",
            run_eagerly=test_utils.should_run_eagerly(),
        )
        x, y = np.ones((10, 10), "float32"), 2 * np.ones((10, 10), "float32")
        model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y))
        self.assertLen(model.trainable_weights, 1)
Пример #6
0
    def test_vector_classification(self):
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(train_samples=100,
                                                         test_samples=0,
                                                         input_shape=(10, ),
                                                         num_classes=2)
        y_train = utils.to_categorical(y_train)

        model = test_utils.get_model_from_layers(
            [
                keras.layers.Dense(16, activation="relu"),
                keras.layers.Dropout(0.1),
                keras.layers.Dense(y_train.shape[-1], activation="softmax"),
            ],
            input_shape=x_train.shape[1:],
        )
        model.compile(
            loss="categorical_crossentropy",
            optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),
            metrics=["acc"],
            run_eagerly=test_utils.should_run_eagerly(),
        )
        history = model.fit(
            x_train,
            y_train,
            epochs=10,
            batch_size=10,
            validation_data=(x_train, y_train),
            verbose=2,
        )
        self.assertGreater(history.history["val_acc"][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Пример #7
0
    def test_timeseries_classification(self):
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(
            train_samples=100,
            test_samples=0,
            input_shape=(4, 10),
            num_classes=2,
        )
        y_train = utils.to_categorical(y_train)

        layers = [
            keras.layers.LSTM(5, return_sequences=True),
            keras.layers.GRU(y_train.shape[-1], activation="softmax"),
        ]
        model = test_utils.get_model_from_layers(layers,
                                                 input_shape=x_train.shape[1:])
        model.compile(
            loss="categorical_crossentropy",
            optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),
            metrics=["acc"],
            run_eagerly=test_utils.should_run_eagerly(),
        )
        history = model.fit(
            x_train,
            y_train,
            epochs=15,
            batch_size=10,
            validation_data=(x_train, y_train),
            verbose=2,
        )
        self.assertGreater(history.history["val_acc"][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Пример #8
0
 def test_relu_layer_as_activation(self):
     layer = keras.layers.Dense(1, activation=keras.layers.ReLU())
     model = test_utils.get_model_from_layers([layer], input_shape=(10, ))
     model.compile("sgd",
                   "mse",
                   run_eagerly=test_utils.should_run_eagerly())
     model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2)
Пример #9
0
    def test_metrics_correctness_with_dataset(self):
        layers = [
            keras.layers.Dense(8,
                               activation='relu',
                               input_dim=4,
                               kernel_initializer='ones'),
            keras.layers.Dense(1,
                               activation='sigmoid',
                               kernel_initializer='ones')
        ]

        model = test_utils.get_model_from_layers(layers, (4, ))

        model.compile(loss='binary_crossentropy',
                      metrics=['accuracy',
                               metrics_module.BinaryAccuracy()],
                      optimizer='rmsprop',
                      run_eagerly=test_utils.should_run_eagerly())

        np.random.seed(123)
        x = np.random.randint(10, size=(100, 4)).astype(np.float32)
        y = np.random.randint(2, size=(100, 1)).astype(np.float32)
        dataset = tf.data.Dataset.from_tensor_slices((x, y))
        dataset = dataset.batch(10)
        outs = model.evaluate(dataset, steps=10)
        self.assertEqual(np.around(outs[1], decimals=1), 0.5)
        self.assertEqual(np.around(outs[2], decimals=1), 0.5)

        y = np.zeros((100, 1), dtype=np.float32)
        dataset = tf.data.Dataset.from_tensor_slices((x, y))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        outs = model.evaluate(dataset, steps=10)
        self.assertEqual(outs[1], 0.)
        self.assertEqual(outs[2], 0.)
Пример #10
0
    def test_revive(self):
        input_shape = None
        if test_utils.get_model_type() == 'functional':
            input_shape = (2, 3)

        layer_with_config = CustomLayerWithConfig(1., 2)
        layer_without_config = CustomLayerNoConfig(3., 4)
        subclassed_with_config = SubclassedModelWithConfig(4., 6.)
        subclassed_without_config = SubclassedModelNoConfig(7., 8.)

        inputs = keras.Input((2, 3))
        x = CustomLayerWithConfig(1., 2)(inputs)
        x = CustomLayerNoConfig(3., 4)(x)
        x = SubclassedModelWithConfig(4., 6.)(x)
        x = SubclassedModelNoConfig(7., 8.)(x)
        inner_model_functional = keras.Model(inputs, x)

        inner_model_sequential = keras.Sequential([
            CustomLayerWithConfig(1., 2),
            CustomLayerNoConfig(3., 4),
            SubclassedModelWithConfig(4., 6.),
            SubclassedModelNoConfig(7., 8.)
        ])

        class SubclassedModel(keras.Model):
            def __init__(self):
                super().__init__()
                self.all_layers = [
                    CustomLayerWithConfig(1., 2),
                    CustomLayerNoConfig(3., 4),
                    SubclassedModelWithConfig(4., 6.),
                    SubclassedModelNoConfig(7., 8.)
                ]

            def call(self, inputs):
                x = inputs
                for layer in self.all_layers:
                    x = layer(x)
                return x

        inner_model_subclassed = SubclassedModel()

        layers = [
            layer_with_config, layer_without_config, subclassed_with_config,
            subclassed_without_config, inner_model_functional,
            inner_model_sequential, inner_model_subclassed
        ]
        model = test_utils.get_model_from_layers(layers,
                                                 input_shape=input_shape)
        # Run data through the Model to create save spec and weights.
        model.predict(np.ones((10, 2, 3)), batch_size=10)

        # Test that the correct checkpointed values are loaded, whether the layer is
        # created from the config or SavedModel.
        layer_with_config.c.assign(2 * layer_with_config.c)
        layer_without_config.c.assign(3 * layer_without_config.c)

        model.save(self.path, save_format='tf')
        revived = keras_load.load(self.path)
        self._assert_revived_correctness(model, revived)
Пример #11
0
def add_metric_step(defun):
    optimizer = keras.optimizers.optimizer_v2.rmsprop.RMSprop()
    model = test_utils.get_model_from_layers(
        [
            LayerWithMetrics(),
            keras.layers.Dense(
                1, kernel_initializer="zeros", activation="softmax"),
        ],
        input_shape=(10, ),
    )

    def train_step(x, y):
        with tf.GradientTape() as tape:
            y_pred_1 = model(x)
            y_pred_2 = model(2 * x)
            y_pred = y_pred_1 + y_pred_2
            loss = keras.losses.mean_squared_error(y, y_pred)
        gradients = tape.gradient(loss, model.trainable_weights)
        optimizer.apply_gradients(zip(gradients, model.trainable_weights))
        assert len(model.metrics) == 2
        return [m.result() for m in model.metrics]

    if defun:
        train_step = tf.function(train_step)

    x, y = tf.ones((10, 10)), tf.zeros((10, 1))
    metrics = train_step(x, y)
    assert np.allclose(metrics[0], 1.5)
    assert np.allclose(metrics[1], 1.5)
    return metrics
    def test_internal_sparse_tensors(self):
        # Create a model that accepts an input, converts it to Sparse, and
        # converts the sparse tensor back to a dense tensor.
        layers = [ToSparse(), ToDense(default_value=-1)]
        model = test_utils.get_model_from_layers(layers, input_shape=(None, ))

        # Define some input data with additional padding.
        input_data = np.array([[1, 0, 0], [2, 3, 0]])
        expected_output = np.array([[1, -1, -1], [2, 3, -1]])
        output = model.predict(input_data)
        self.assertAllEqual(expected_output, output)
Пример #13
0
 def test_zero_regularization(self):
     # Verifies that training with zero regularization works.
     x, y = np.ones((10, 10)), np.ones((10, 3))
     model = test_utils.get_model_from_layers([
         keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))
     ],
                                              input_shape=(10, ))
     model.compile('sgd',
                   'mse',
                   run_eagerly=test_utils.should_run_eagerly())
     model.fit(x, y, batch_size=5, epochs=1)
Пример #14
0
    def test_generator_dynamic_shapes(self):

        x = [
            "I think juice is great",
            "unknown is the best language since slicedbread",
            "a a a a a a a",
            "matmul",
            "Yaks are also quite nice",
        ]
        y = [1, 0, 0, 1, 1]

        vocab = {
            word: i + 1
            for i, word in enumerate(
                sorted(set(itertools.chain(*[i.split() for i in x]))))
        }

        def data_gen(batch_size=2):
            np.random.seed(0)
            data = list(zip(x, y)) * 10
            np.random.shuffle(data)

            def pack_and_pad(queue):
                x = [[vocab[j] for j in i[0].split()] for i in queue]
                pad_len = max(len(i) for i in x)
                x = np.array([i + [0] * (pad_len - len(i)) for i in x])
                y = np.array([i[1] for i in queue])
                del queue[:]
                return x, y[:, np.newaxis]

            queue = []
            for i, element in enumerate(data):
                queue.append(element)
                if not (i + 1) % batch_size:
                    yield pack_and_pad(queue)

            if queue:
                # Last partial batch
                yield pack_and_pad(queue)

        model = test_utils.get_model_from_layers(
            [
                layers_module.Embedding(input_dim=len(vocab) + 1,
                                        output_dim=4),
                layers_module.SimpleRNN(units=1),
                layers_module.Activation("sigmoid"),
            ],
            input_shape=(None, ),
        )

        model.compile(loss=losses.binary_crossentropy, optimizer="sgd")
        model.fit(data_gen(), epochs=1, steps_per_epoch=5)
    def test_ragged_tensor_rebatched_outputs(self):
        # Create a model that accepts an input, converts it to Ragged, and
        # converts the ragged tensor back to a dense tensor.
        layers = [ToRagged(padding=0)]
        model = test_utils.get_model_from_layers(layers, input_shape=(None, ))
        model._run_eagerly = test_utils.should_run_eagerly()

        # Define some input data with additional padding.
        input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]])
        output = model.predict(input_data, batch_size=2)

        expected_values = [[1], [2, 3], [4], [5, 6]]
        self.assertAllEqual(expected_values, output)
Пример #16
0
    def test_activity_regularizer_loss_value(self):
        layer = layers.Dense(1,
                             kernel_initializer='zeros',
                             bias_initializer='ones',
                             activity_regularizer='l2')

        model = test_utils.get_model_from_layers([layer], input_shape=(10, ))

        x = np.ones((10, 10), 'float32')
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        model.compile(optimizer, run_eagerly=test_utils.should_run_eagerly())
        loss = model.test_on_batch(x)
        self.assertAlmostEqual(0.01, loss, places=4)
Пример #17
0
  def test_save_and_load(self):
    saved_model_dir = self._save_model_dir()
    save_format = test_utils.get_save_format()
    save_kwargs = test_utils.get_save_kwargs()

    if ((save_format == 'h5' or not save_kwargs.get('save_traces', True)) and
        test_utils.get_model_type() == 'subclass'):
      # HDF5 format currently does not allow saving subclassed models.
      # When saving with `save_traces=False`, the subclassed model must have a
      # get_config/from_config, which the autogenerated model does not have.
      return

    with self.cached_session():
      model = test_utils.get_model_from_layers(
          [keras.layers.Dense(2),
           keras.layers.RepeatVector(3),
           keras.layers.TimeDistributed(keras.layers.Dense(3))],
          input_shape=(3,))
      model.compile(
          loss=keras.losses.MSE,
          optimizer=keras.optimizers.optimizer_v2.rmsprop.RMSprop(lr=0.0001),
          metrics=[
              keras.metrics.categorical_accuracy,
              keras.metrics.CategoricalCrossentropy(
                  name='cce', label_smoothing=tf.constant(0.2)),
          ],
          weighted_metrics=[
              keras.metrics.categorical_crossentropy,
              keras.metrics.CategoricalCrossentropy(
                  name='cce', label_smoothing=tf.constant(0.2)),
          ],
          sample_weight_mode='temporal')

      x = np.random.random((1, 3))
      y = np.random.random((1, 3, 3))
      model.train_on_batch(x, y)

      out = model.predict(x)
      keras.models.save_model(
          model, saved_model_dir, save_format=save_format,
          **save_kwargs)

      loaded_model = keras.models.load_model(saved_model_dir)
      self._assert_same_weights_and_metrics(model, loaded_model)

      out2 = loaded_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)

      eval_out = model.evaluate(x, y)
      eval_out2 = loaded_model.evaluate(x, y)
      self.assertArrayNear(eval_out, eval_out2, 0.001)
 def _get_model(self):
   x = layers.Dense(3, kernel_initializer='ones', trainable=False)
   out = layers.Dense(
       1, kernel_initializer='ones', name='output', trainable=False)
   model = test_utils.get_model_from_layers([x, out], input_shape=(1,))
   model.compile(
       optimizer='rmsprop',
       loss='mse',
       metrics=[metrics.MeanSquaredError(name='mean_squared_error')],
       weighted_metrics=[
           metrics.MeanSquaredError(name='mean_squared_error_2')
       ],
       run_eagerly=test_utils.should_run_eagerly())
   return model
Пример #19
0
    def test_loss_callable_on_model_fit(self):
        model = test_utils.get_model_from_layers([test_utils.Bias()],
                                                 input_shape=(1, ))

        def callable_loss():
            return tf.reduce_sum(model.weights)

        model.add_loss(callable_loss)
        model.compile(optimizer_v2.gradient_descent.SGD(0.1),
                      run_eagerly=test_utils.should_run_eagerly())

        history = model.fit(self.x, batch_size=3, epochs=5)
        self.assertAllClose(history.history['loss'], [0., -.1, -.2, -.3, -.4],
                            1e-3)
Пример #20
0
    def test_vector_classification_shared_model(self):
        # Test that Sequential models that feature internal updates
        # and internal losses can be shared.
        np.random.seed(1337)
        (x_train, y_train), _ = test_utils.get_test_data(train_samples=100,
                                                         test_samples=0,
                                                         input_shape=(10, ),
                                                         num_classes=2)
        y_train = utils.to_categorical(y_train)

        base_model = test_utils.get_model_from_layers(
            [
                keras.layers.Dense(
                    16,
                    activation="relu",
                    kernel_regularizer=keras.regularizers.l2(1e-5),
                    bias_regularizer=keras.regularizers.l2(1e-5),
                ),
                keras.layers.BatchNormalization(),
            ],
            input_shape=x_train.shape[1:],
        )
        x = keras.layers.Input(x_train.shape[1:])
        y = base_model(x)
        y = keras.layers.Dense(y_train.shape[-1], activation="softmax")(y)
        model = keras.models.Model(x, y)
        model.compile(
            loss="categorical_crossentropy",
            optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),
            metrics=["acc"],
            run_eagerly=test_utils.should_run_eagerly(),
        )
        self.assertLen(model.losses, 2)
        if not tf.executing_eagerly():
            self.assertLen(model.get_updates_for(x), 2)
        history = model.fit(
            x_train,
            y_train,
            epochs=10,
            batch_size=10,
            validation_data=(x_train, y_train),
            verbose=2,
        )
        self.assertGreater(history.history["val_acc"][-1], 0.7)
        _, val_acc = model.evaluate(x_train, y_train)
        self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
        predictions = model.predict(x_train)
        self.assertEqual(predictions.shape, (x_train.shape[0], 2))
Пример #21
0
  def test_transitive_variable_creation(self):
    dense = keras.layers.Dense(1, use_bias=False, kernel_initializer='ones')

    def bad_lambda_fn(x):
      return dense(x + 1)  # Dense layer is built on first call

    expected_error = textwrap.dedent(r"""
    (    )?The following Variables were created within a Lambda layer \(bias_dense\)
    (    )?but are not tracked by said layer:
    (    )?  <tf.Variable \'.*bias_dense/dense/kernel:0\'.+
    (    )?The layer cannot safely ensure proper Variable reuse.+""")

    with self.assertRaisesRegex(ValueError, expected_error):
      layer = keras.layers.Lambda(bad_lambda_fn, name='bias_dense')
      model = test_utils.get_model_from_layers([layer], input_shape=(1,))
      model(tf.ones((4, 1)))
Пример #22
0
    def test_training_arg_propagation(self, defun):

        model = test_utils.get_model_from_layers([LayerWithTrainingArg()],
                                                 input_shape=(1, ))

        def train_step(x):
            return model(x), model(x, training=False), model(x, training=True)

        if defun:
            train_step = tf.function(train_step)

        x = tf.ones((1, 1))
        results = train_step(x)
        self.assertAllClose(results[0], tf.zeros((1, 1)))
        self.assertAllClose(results[1], tf.zeros((1, 1)))
        self.assertAllClose(results[2], tf.ones((1, 1)))
Пример #23
0
 def _get_model(self):
     x = layers.Dense(3, kernel_initializer="ones", trainable=False)
     out = layers.Dense(1,
                        kernel_initializer="ones",
                        name="output",
                        trainable=False)
     model = test_utils.get_model_from_layers([x, out], input_shape=(1, ))
     model.compile(
         optimizer="rmsprop",
         loss="mse",
         metrics=[metrics.MeanSquaredError(name="mean_squared_error")],
         weighted_metrics=[
             metrics.MeanSquaredError(name="mean_squared_error_2")
         ],
         run_eagerly=test_utils.should_run_eagerly(),
     )
     return model
Пример #24
0
  def test_training_internal_ragged_tensors(self):
    # Create a model that implements y=Mx. This is easy to learn and will
    # demonstrate appropriate gradient passing. (We have to use RaggedTensors
    # for this test, as ToSparse() doesn't support gradient propagation through
    # the layer.) TODO(b/124796939): Investigate this.
    layers = [core.Dense(2), ToRagged(padding=0), ToDense(default_value=-1)]
    model = test_utils.get_model_from_layers(layers, input_shape=(1,))

    input_data = np.random.rand(1024, 1)
    expected_data = np.concatenate((input_data * 3, input_data * .5), axis=-1)

    model.compile(loss="mse", optimizer="adam", **get_test_mode_kwargs())
    history = model.fit(input_data, expected_data, epochs=10, verbose=0)

    # If the model trained, the loss stored at history[0] should be different
    # than the one stored at history[-1].
    self.assertNotEqual(history.history["loss"][-1], history.history["loss"][0])
    def test_sparse_tensor_outputs(self):
        # Create a model that accepts an input, converts it to Ragged, and
        # converts the ragged tensor back to a dense tensor.
        layers = [ToSparse()]
        model = test_utils.get_model_from_layers(layers, input_shape=(None, ))
        model._run_eagerly = test_utils.should_run_eagerly()

        # Define some input data with additional padding.
        input_data = np.array([[1, 0, 0], [2, 3, 0]])
        output = model.predict(input_data)

        expected_indices = np.array([[0, 0], [1, 0], [1, 1]])
        expected_values = np.array([1, 2, 3])
        expected_dense_shape = np.array([2, 3])

        self.assertAllEqual(output.indices, expected_indices)
        self.assertAllEqual(output.values, expected_values)
        self.assertAllEqual(output.dense_shape, expected_dense_shape)
Пример #26
0
    def test_creation_inside_lambda(self):
        def lambda_fn(x):
            scale = tf.Variable(1., trainable=True, name='scale')
            shift = tf.Variable(1., trainable=True, name='shift')
            return x * scale + shift

        expected_error = textwrap.dedent(r"""
    (    )?The following Variables were created within a Lambda layer \(shift_and_scale\)
    (    )?but are not tracked by said layer:
    (    )?  <tf.Variable \'.*shift_and_scale/scale:0\'.+
    (    )?  <tf.Variable \'.*shift_and_scale/shift:0\'.+
    (    )?The layer cannot safely ensure proper Variable reuse.+""")

        with self.assertRaisesRegex(ValueError, expected_error):
            layer = keras.layers.Lambda(lambda_fn, name='shift_and_scale')
            model = test_utils.get_model_from_layers([layer],
                                                     input_shape=(1, ))
            model(tf.ones((4, 1)))
Пример #27
0
    def test_simple_build_with_constant(self):
        class BuildConstantLayer(keras.layers.Layer):
            def build(self, input_shape):
                self.b = tf.convert_to_tensor(2.0)

            def call(self, inputs):
                return self.b * inputs

        layer = BuildConstantLayer()
        model = test_utils.get_model_from_layers(
            [layer, keras.layers.Dense(1)], input_shape=(1, ))

        x = tf.convert_to_tensor([[3.0]])
        self.assertEqual(tf_utils.is_symbolic_tensor(model(x)),
                         not tf.executing_eagerly())
        self.assertEqual(tf_utils.is_symbolic_tensor(layer(x)),
                         not tf.executing_eagerly())
        self.assertAllClose(keras.backend.get_value(layer(x)), [[6.0]])
Пример #28
0
    def test_ignore_validation_split_when_validation_dataset_is_present(
            self, validation_split):
        # Create a model that learns y=Mx.
        layers = [core.Dense(1)]
        model = test_utils.get_model_from_layers(layers, input_shape=(1, ))
        model.compile(loss="mse",
                      optimizer="adam",
                      metrics=["mean_absolute_error"])

        train_dataset = _create_dataset(num_samples=200, batch_size=10)
        eval_dataset = _create_dataset(num_samples=50, batch_size=25)

        # Make sure model.fit doesn't raise an error because of the mocking
        # alone.
        mock_train_validation_split_return = (
            (train_dataset, None, None),
            eval_dataset,
        )

        with mock.patch.object(
                data_adapter,
                "train_validation_split",
                return_value=mock_train_validation_split_return,
        ) as mock_train_validation_split:
            model.fit(
                x=train_dataset,
                validation_split=validation_split,
                validation_data=eval_dataset,
                epochs=2,
            )
            mock_train_validation_split.assert_not_called()

            history = model.fit(x=train_dataset,
                                validation_data=eval_dataset,
                                epochs=2)
            evaluation = model.evaluate(x=eval_dataset)

            # See test_validation_dataset_with_no_step_arg for details.
            self.assertAlmostEqual(
                history.history["val_mean_absolute_error"][-1],
                evaluation[-1],
                places=5,
            )
Пример #29
0
    def test_token_classification(self):
        def densify(x, y):
            return x.to_tensor(), y.to_tensor()

        utils.set_random_seed(1337)
        data = tf.ragged.stack(
            [
                np.random.randint(low=0, high=16, size=random.randint(4, 16))
                for _ in range(100)
            ]
        )
        labels = tf.ragged.stack(
            [np.random.randint(low=0, high=3, size=len(arr)) for arr in data]
        )
        features_dataset = tf.data.Dataset.from_tensor_slices(data)
        labels_dataset = tf.data.Dataset.from_tensor_slices(labels)
        dataset = tf.data.Dataset.zip((features_dataset, labels_dataset))
        dataset = dataset.batch(batch_size=10)
        dataset = dataset.map(densify)  # Pads with 0 values by default

        layers = [
            keras.layers.Embedding(16, 4),
            keras.layers.Conv1D(4, 5, padding="same", activation="relu"),
            keras.layers.Conv1D(8, 5, padding="same"),
            keras.layers.BatchNormalization(),
            keras.layers.Conv1D(3, 5, padding="same", activation="softmax"),
        ]
        model = test_utils.get_model_from_layers(layers, input_shape=(None,))
        model.compile(
            loss="sparse_categorical_crossentropy",
            optimizer="adam",
            metrics=["acc"],
        )
        history = model.fit(
            dataset, epochs=10, validation_data=dataset, verbose=2
        )
        self.assertGreater(history.history["val_acc"][-1], 0.5)
        _, val_acc = model.evaluate(dataset)
        self.assertAlmostEqual(history.history["val_acc"][-1], val_acc)
        predictions = model.predict(dataset)
        self.assertIsInstance(predictions, tf.RaggedTensor)
        self.assertEqual(predictions.shape[0], len(dataset) * 10)
        self.assertEqual(predictions.shape[-1], 3)
Пример #30
0
def add_loss_step(defun):
    optimizer = keras.optimizers.optimizer_v2.adam.Adam()
    model = test_utils.get_model_from_layers([LayerWithLosses()],
                                             input_shape=(10, ))

    def train_step(x):
        with tf.GradientTape() as tape:
            model(x)
            assert len(model.losses) == 2
            loss = tf.reduce_sum(model.losses)
        gradients = tape.gradient(loss, model.trainable_weights)
        optimizer.apply_gradients(zip(gradients, model.trainable_weights))
        return loss

    if defun:
        train_step = tf.function(train_step)

    x = tf.ones((10, 10))
    return train_step(x)