def test_batchnorm_convnet(self):
    if test.is_gpu_available(cuda_only=True):
      with self.session(use_gpu=True):
        model = keras.models.Sequential()
        norm = keras.layers.BatchNormalization(
            axis=1, input_shape=(3, 4, 4), momentum=0.8)
        model.add(norm)
        model.compile(
            loss='mse',
            optimizer=gradient_descent.GradientDescentOptimizer(0.01),
            run_eagerly=testing_utils.should_run_eagerly(),
            experimental_run_tf_function=testing_utils.should_run_tf_function())

        # centered on 5.0, variance 10.0
        x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
        model.fit(x, x, epochs=4, verbose=0)
        out = model.predict(x)
        out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1))
        out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1))

        np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1)
        np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1)
示例#2
0
  def test_finite_dataset_unknown_cardinality_out_of_data(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.compile(
        'rmsprop',
        'mse',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    inputs = np.zeros((100, 3), dtype=np.float32)
    targets = np.random.randint(0, 4, size=100, dtype=np.int32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.filter(lambda x, y: True).batch(10)
    self.assertEqual(
        keras.backend.get_value(cardinality.cardinality(dataset)),
        cardinality.UNKNOWN)

    batch_counter = BatchCounterCallback()
    with test.mock.patch.object(logging, 'warning') as mock_log:
      # steps_per_epoch (200) is greater than the dataset size (100). As this is
      # unexpected, training will stop and not make it to the second epoch.
      history = model.fit(
          dataset,
          epochs=2,
          verbose=1,
          callbacks=[batch_counter],
          steps_per_epoch=200)
      self.assertIn(
          'ran out of data; interrupting training.', str(mock_log.call_args))
      self.assertIn(
          'can generate at least '
          '`steps_per_epoch * epochs` batches (in this case, 400 batches). '
          'You may need to use the repeat() function when '
          'building your dataset.', str(mock_log.call_args))

    self.assertLen(history.history['loss'], 1)
    self.assertEqual(batch_counter.batch_end_count, 10)
    model.evaluate(dataset)
    out = model.predict(dataset)
    self.assertEqual(out.shape[0], 100)
示例#3
0
  def test_defun_on_call(self):
    # Check that one can subclass Sequential and place the `call` in a `defun`.

    class MySequential(keras.Sequential):

      def __init__(self, name=None):
        super(MySequential, self).__init__(name=name)
        self.call = function.defun(self.call)

    model = MySequential()
    model.add(keras.layers.Dense(4, activation='relu'))
    model.add(keras.layers.Dense(5, activation='softmax'))

    model.compile(
        loss='mse',
        optimizer='rmsprop',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    x = np.random.random((2, 6))
    y = np.random.random((2, 5))
    model.fit(x, y, epochs=1)
  def test_single_io_workflow_with_datasets(self):
    num_classes = 2
    num_samples = 10
    input_dim = 50

    with self.cached_session():
      model = testing_utils.SmallSubclassMLP(
          num_hidden=32, num_classes=num_classes, use_dp=True, use_bn=True)
      model.compile(
          loss='mse',
          optimizer='rmsprop',
          run_eagerly=testing_utils.should_run_eagerly(),
          experimental_run_tf_function=testing_utils.should_run_tf_function())

      x = np.ones((num_samples, input_dim), dtype=np.float32)
      y = np.zeros((num_samples, num_classes), dtype=np.float32)
      dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
      dataset = dataset.repeat(100)
      dataset = dataset.batch(10)

      model.fit(dataset, epochs=2, steps_per_epoch=10, verbose=0)
      _ = model.evaluate(dataset, steps=10, verbose=0)
    def test_sparse_tensor_rebatched_outputs(self):
        # Create a model that accepts an input, converts it to Ragged, and
        # converts the ragged tensor back to a dense tensor.
        layers = [ToSparse()]
        model = testing_utils.get_model_from_layers(layers,
                                                    input_shape=(None, ))
        model._experimental_run_tf_function = testing_utils.should_run_tf_function(
        )
        model._run_eagerly = testing_utils.should_run_eagerly()

        # Define some input data with additional padding.
        input_data = np.array([[1, 0, 0], [2, 3, 0], [4, 0, 0], [5, 6, 0]])
        output = model.predict(input_data, batch_size=2)

        expected_indices = np.array([[0, 0], [1, 0], [1, 1], [2, 0], [3, 0],
                                     [3, 1]])
        expected_values = np.array([1, 2, 3, 4, 5, 6])
        expected_dense_shape = np.array([4, 3])

        self.assertAllEqual(output.indices, expected_indices)
        self.assertAllEqual(output.values, expected_values)
        self.assertAllEqual(output.dense_shape, expected_dense_shape)
示例#6
0
    def test_layernorm_convnet_channel_last(self):
        model = keras.models.Sequential()
        norm = keras.layers.LayerNormalization(input_shape=(4, 4, 3))
        model.add(norm)
        model.compile(
            loss='mse',
            optimizer=gradient_descent.GradientDescentOptimizer(0.01),
            run_eagerly=testing_utils.should_run_eagerly(),
            experimental_run_tf_function=testing_utils.should_run_tf_function(
            ))

        # centered on 5.0, variance 10.0
        x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
        model.fit(x, x, epochs=4, verbose=0)
        out = model.predict(x)
        out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3))
        out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3))

        np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)),
                                   0.0,
                                   atol=1e-1)
        np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1)
示例#7
0
    def test_return_state(self, layer_class):
        input_size = 10
        timesteps = 6
        units = 2
        num_samples = 32
        num_states = 2 if layer_class is keras.layers.CuDNNLSTM else 1

        inputs = keras.Input(batch_shape=(num_samples, timesteps, input_size))
        layer = layer_class(units, return_state=True, stateful=True)
        outputs = layer(inputs)
        _, state = outputs[0], outputs[1:]
        self.assertEqual(len(state), num_states)
        model = keras.models.Model(inputs, state[0])
        model.run_eagerly = testing_utils.should_run_eagerly()
        model._experimental_run_tf_function = testing_utils.should_run_tf_function(
        )

        inputs = np.random.random((num_samples, timesteps, input_size))
        state = model.predict(inputs)
        np.testing.assert_allclose(keras.backend.eval(layer.states[0]),
                                   state,
                                   atol=1e-4)
示例#8
0
  def test_stateful_LSTM_training(self):
    # See b/123587692 for more context.
    vocab_size = 20
    embedding_dim = 10
    batch_size = 8
    timestep = 12
    units = 5
    x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
    y = np.random.randint(0, vocab_size, size=(batch_size, timestep))

    model = keras.Sequential([
        keras.layers.Embedding(vocab_size, embedding_dim,
                               batch_input_shape=[batch_size, timestep]),
        rnn.LSTM(units, return_sequences=True, stateful=True),
        keras.layers.Dense(vocab_size)
    ])
    model.compile(
        optimizer='adam',
        loss='sparse_categorical_crossentropy',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())
    model.fit(x, y, epochs=1, shuffle=False)
    def test_layer_computation(self, adapt_data, axis, test_data, use_dataset,
                               expected):
        input_shape = tuple([None for _ in range(test_data.ndim - 1)])
        if use_dataset:
            # Keras APIs expect batched datasets
            adapt_data = dataset_ops.Dataset.from_tensor_slices(
                adapt_data).batch(test_data.shape[0] // 2)
            test_data = dataset_ops.Dataset.from_tensor_slices(
                test_data).batch(test_data.shape[0] // 2)

        cls = get_layer_class()
        layer = cls(axis=axis)
        layer.adapt(adapt_data)

        input_data = keras.Input(shape=input_shape)
        output = layer(input_data)
        model = keras.Model(input_data, output)
        model._run_eagerly = testing_utils.should_run_eagerly()
        model._experimental_run_tf_function = testing_utils.should_run_tf_function(
        )
        output_data = model.predict(test_data)
        self.assertAllClose(expected, output_data)
示例#10
0
  def test_raw_variable_assignment(self):

    class RawVariableLayer(keras.layers.Layer):

      def __init__(self, **kwargs):
        super(RawVariableLayer, self).__init__(**kwargs)
        # Test variables in nested structure.
        self.var_list = [variables.Variable(1.), {'a': variables.Variable(2.)}]

      def call(self, inputs):
        return inputs * self.var_list[0] * self.var_list[1]['a']

    model = testing_utils.get_model_from_layers([RawVariableLayer()],
                                                input_shape=(10,))
    model.compile(
        'sgd',
        'mse',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())
    x, y = np.ones((10, 10)), np.ones((10, 10))
    # Checks that variables get initialized.
    model.fit(x, y, batch_size=2, epochs=2)
示例#11
0
  def test_no_loss_in_compile(self):

    class InternalLossModel(keras.Model):

      def __init__(self):
        super(InternalLossModel, self).__init__()
        self.dense = keras.layers.Dense(1)

      def call(self, inputs):
        out = self.dense(inputs)
        self.add_loss(math_ops.reduce_sum(out))
        return out

    model = InternalLossModel()
    x = np.ones((10, 10))
    model.predict(x)
    model.compile(
        optimizer='rmsprop',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())
    model.fit(x)
    model.evaluate(x)
示例#12
0
    def test_merge_subtract(self):
        i1 = keras.layers.Input(shape=(4, 5))
        i2 = keras.layers.Input(shape=(4, 5))
        i3 = keras.layers.Input(shape=(4, 5))

        subtract_layer = keras.layers.Subtract()
        o = subtract_layer([i1, i2])
        self.assertListEqual(o.shape.as_list(), [None, 4, 5])
        model = keras.models.Model([i1, i2], o)
        model.run_eagerly = testing_utils.should_run_eagerly()
        model._experimental_run_tf_function = testing_utils.should_run_tf_function(
        )

        x1 = np.random.random((2, 4, 5))
        x2 = np.random.random((2, 4, 5))
        out = model.predict([x1, x2])
        self.assertEqual(out.shape, (2, 4, 5))
        self.assertAllClose(out, x1 - x2, atol=1e-4)

        self.assertEqual(subtract_layer.compute_mask([i1, i2], [None, None]),
                         None)
        self.assertTrue(
            np.all(
                K.eval(
                    subtract_layer.compute_mask(
                        [i1, i2],
                        [K.variable(x1), K.variable(x2)]))))

        with self.assertRaisesRegexp(ValueError, "`mask` should be a list."):
            subtract_layer.compute_mask([i1, i2], x1)
        with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."):
            subtract_layer.compute_mask(i1, [None, None])
        with self.assertRaisesRegexp(
                ValueError, "layer should be called on exactly 2 inputs"):
            subtract_layer([i1, i2, i3])
        with self.assertRaisesRegexp(
                ValueError, "layer should be called on exactly 2 inputs"):
            subtract_layer([i1])
示例#13
0
  def test_train_eval_with_steps(self):
    # See b/142880049 for more details.
    inp = keras.Input(shape=(4,), name='inp1')
    out = keras.layers.Dense(2)(inp)
    model = keras.Model(inp, out)
    model.compile(
        'rmsprop', loss='mse',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    inputs = np.zeros((100, 4), dtype=np.float32)
    targets = np.random.randint(0, 2, size=100, dtype=np.int32)
    training_ds = dataset_ops.Dataset.from_tensor_slices(
        (inputs, targets)).repeat().batch(10)

    # Create eval dataset with generator, so that dataset won't contain the
    # overall size metadata. Without eval_steps, we expect to run through all
    # the data in this dataset every epoch.
    def gen():
      for _ in range(100):
        yield (np.zeros(4, dtype=np.float32),
               np.random.randint(0, 2, size=1, dtype=np.int32))
    eval_ds = dataset_ops.Dataset.from_generator(
        generator=gen,
        output_types=('float64', 'int32'),
        output_shapes=([4], [1])).batch(100)
    batch_counter = BatchCounterCallback()

    model.fit(
        training_ds,
        steps_per_epoch=10,
        epochs=10,
        validation_data=eval_ds,
        callbacks=[batch_counter]
    )

    # Expect 10 batch from training per epoch.
    self.assertEqual(batch_counter.batch_end_count, 100)
示例#14
0
  def test_finite_dataset_unknown_cardinality_no_steps_arg(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    model.compile(
        'rmsprop',
        'mse',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    inputs = np.zeros((100, 3), dtype=np.float32)
    targets = np.random.randint(0, 4, size=100, dtype=np.int32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.filter(lambda x, y: True).batch(10)
    self.assertEqual(keras.backend.get_value(cardinality.cardinality(dataset)),
                     cardinality.UNKNOWN)

    batch_counter = BatchCounterCallback()
    history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])

    self.assertLen(history.history['loss'], 2)
    self.assertEqual(batch_counter.batch_end_count, 20)
    model.evaluate(dataset)
    out = model.predict(dataset)
    self.assertEqual(out.shape[0], 100)
示例#15
0
  def test_dataset_with_sample_weights(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = 'rmsprop'
    loss = 'mse'
    metrics = ['mae', metrics_module.CategoricalAccuracy()]
    model.compile(
        optimizer,
        loss,
        metrics=metrics,
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    sample_weights = np.ones((10), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
                                                      sample_weights))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)

    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
    model.evaluate(dataset, steps=2, verbose=1)
    model.predict(dataset, steps=2)
示例#16
0
  def test_subclassed_model_with_feature_columns_with_ds_input(self):
    col_a = fc.numeric_column('a')
    col_b = fc.numeric_column('b')

    dnn_model = TestDNNModel([col_a, col_b], 20)

    dnn_model.compile(
        optimizer='rmsprop',
        loss='categorical_crossentropy',
        metrics=['accuracy'],
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    y = np.random.randint(20, size=(100, 1))
    y = keras.utils.to_categorical(y, num_classes=20)
    x = {'a': np.random.random((100, 1)), 'b': np.random.random((100, 1))}
    ds1 = dataset_ops.Dataset.from_tensor_slices(x)
    ds2 = dataset_ops.Dataset.from_tensor_slices(y)
    ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
    dnn_model.fit(ds, steps_per_epoch=1)
    dnn_model.fit(ds, steps_per_epoch=1)
    dnn_model.evaluate(ds, steps=1)
    dnn_model.predict(ds, steps=1)
示例#17
0
  def test_calling_model_on_same_dataset(self):
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = 'rmsprop'
    loss = 'mse'
    metrics = ['mae']
    model.compile(
        optimizer,
        loss,
        metrics=metrics,
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)

    # Call fit with validation data
    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=dataset, validation_steps=2)
    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=dataset, validation_steps=2)
 def test_wide_deep_model_as_layer(self):
   linear_model = linear.LinearModel(units=1)
   dnn_model = sequential.Sequential([core.Dense(units=1)])
   linear_input = input_layer.Input(shape=(3,), name='linear')
   dnn_input = input_layer.Input(shape=(5,), name='dnn')
   wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
   wide_deep_output = wide_deep_model((linear_input, dnn_input))
   input_b = input_layer.Input(shape=(1,), name='b')
   output_b = core.Dense(units=1)(input_b)
   model = training.Model(
       inputs=[linear_input, dnn_input, input_b],
       outputs=[wide_deep_output + output_b])
   linear_input_np = np.random.uniform(low=-5, high=5, size=(64, 3))
   dnn_input_np = np.random.uniform(low=-5, high=5, size=(64, 5))
   input_b_np = np.random.uniform(low=-5, high=5, size=(64,))
   output_np = linear_input_np[:, 0] + .2 * dnn_input_np[:, 1] + input_b_np
   model.compile(
       optimizer='sgd',
       loss='mse',
       metrics=[],
       run_eagerly=testing_utils.should_run_eagerly(),
       experimental_run_tf_function=testing_utils.should_run_tf_function())
   model.fit([linear_input_np, dnn_input_np, input_b_np], output_np, epochs=5)
示例#19
0
  def test_lambda_with_variable_in_model(self):
    v = variables.Variable(1., trainable=True)
    def lambda_fn(x, v):
      return x * v

    # While it is generally not advised to mix Variables with Lambda layers, if
    # the variables are explicitly set as attributes then they are still
    # tracked. This is consistent with the base Layer behavior.
    layer = keras.layers.Lambda(lambda_fn, arguments={'v': v})
    self.assertLen(layer.trainable_weights, 0)
    layer.v = v
    self.assertLen(layer.trainable_weights, 1)

    model = testing_utils.get_model_from_layers([layer], input_shape=(10,))
    model.compile(
        keras.optimizer_v2.gradient_descent.SGD(0.1),
        'mae',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())
    x, y = np.ones((10, 10), 'float32'), 2 * np.ones((10, 10), 'float32')
    model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y))
    self.assertLen(model.trainable_weights, 1)
    self.assertAllClose(keras.backend.get_value(model.trainable_weights[0]), 2.)
示例#20
0
  def test_clone_functional_with_masking(self, share_weights):
    if share_weights:
      clone_fn = functools.partial(
          keras.models._clone_functional_model, layer_fn=models.share_weights)
    else:
      clone_fn = keras.models.clone_model

    x = np.array([[[1.], [1.]], [[0.], [0.]]])
    inputs = keras.Input((2, 1))
    outputs = keras.layers.Masking(mask_value=0)(inputs)
    outputs = keras.layers.TimeDistributed(
        keras.layers.Dense(1, kernel_initializer='one'))(outputs)
    model = keras.Model(inputs, outputs)

    model = clone_fn(model)
    model.compile(
        loss='mse',
        optimizer=testing_utils.get_v2_optimizer('adam'),
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())
    y = np.array([[[1], [1]], [[1], [1]]])
    loss = model.train_on_batch(x, y)
    self.assertEqual(float(loss), 0.)
示例#21
0
  def test_custom_build_with_fit(self):

    class DummyModel(keras.Model):

      def __init__(self):
        super(DummyModel, self).__init__()
        self.layer1 = keras.layers.Dense(10, activation='relu')

      def build(self, input_shape):
        self.layer2 = keras.layers.Dense(1, activation='relu')

      def call(self, inputs):
        return self.layer2(self.layer1(inputs))

    model = DummyModel()
    model.compile(
        'sgd',
        'mse',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())
    model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2, epochs=2)
    self.assertLen(model.layers, 2)
    self.assertLen(model.trainable_variables, 4)
  def test_trace_model_outputs_after_fitting(self):
    input_dim = 5 if testing_utils.get_model_type() == 'functional' else None
    model = testing_utils.get_small_mlp(10, 3, input_dim)
    model.compile(
        optimizer='sgd',
        loss='mse',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())
    model.fit(
        x=np.random.random((8, 5)).astype(np.float32),
        y=np.random.random((8, 3)).astype(np.float32),
        epochs=2)

    inputs = array_ops.ones((8, 5))

    fn = saving_utils.trace_model_call(model)
    signature_outputs = fn(inputs)
    if model.output_names:
      expected_outputs = {model.output_names[0]: model(inputs)}
    else:
      expected_outputs = {'output_1': model(inputs)}

    self._assert_all_close(expected_outputs, signature_outputs)
示例#23
0
  def test_subclass_nested_in_graph(self):
    num_classes = 2
    num_samples = 100
    input_dim = 50

    model = model_util.get_nested_model_3(
        input_dim=input_dim, num_classes=num_classes)
    model.compile(
        loss='mse',
        optimizer='rmsprop',
        metrics=['acc'],
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    x = np.ones((num_samples, input_dim))
    y = np.zeros((num_samples, num_classes))

    model.fit(x, y, epochs=2, batch_size=32, verbose=0)
    _ = model.evaluate(x, y, verbose=0)

    self.assertEqual(len(model.weights), 16)
    self.assertEqual(len(model.non_trainable_weights), 4)
    self.assertEqual(len(model.trainable_weights), 12)
示例#24
0
    def test_predict_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.run_eagerly = testing_utils.should_run_eagerly()
        model._experimental_run_tf_function = testing_utils.should_run_tf_function(
        )

        self._sleep_at_end = True
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
        # Test generator with just inputs (no targets)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
示例#25
0
  def test_if_training_pattern_loss(self):

    class MyLayer(keras.layers.Layer):

      def call(self, inputs, training=None):
        if training:
          loss = math_ops.reduce_sum(inputs)
        else:
          loss = 0.
        self.add_loss(loss)
        return inputs

    inputs = keras.Input((3,))
    outputs = MyLayer()(inputs)
    model = keras.Model(inputs, outputs)
    model.compile(
        'sgd',
        'mse',
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())
    train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
    self.assertEqual(train_loss, 2 * 3)
    test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
    self.assertEqual(test_loss, 0)
示例#26
0
    def test_merge_concatenate(self):
        i1 = keras.layers.Input(shape=(4, 5))
        i2 = keras.layers.Input(shape=(4, 5))
        concat_layer = keras.layers.Concatenate(axis=1)
        o = concat_layer([i1, i2])
        self.assertListEqual(o.shape.as_list(), [None, 8, 5])
        model = keras.models.Model([i1, i2], o)
        model.run_eagerly = testing_utils.should_run_eagerly()
        model._experimental_run_tf_function = testing_utils.should_run_tf_function(
        )

        x1 = np.random.random((2, 4, 5))
        x2 = np.random.random((2, 4, 5))
        out = model.predict([x1, x2])
        self.assertEqual(out.shape, (2, 8, 5))
        self.assertAllClose(out, np.concatenate([x1, x2], axis=1), atol=1e-4)

        self.assertEqual(concat_layer.compute_mask([i1, i2], [None, None]),
                         None)
        self.assertTrue(
            np.all(
                K.eval(
                    concat_layer.compute_mask(
                        [i1, i2],
                        [K.variable(x1), K.variable(x2)]))))

        with self.assertRaisesRegexp(ValueError, "`mask` should be a list."):
            concat_layer.compute_mask([i1, i2], x1)
        with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."):
            concat_layer.compute_mask(i1, [None, None])
        with self.assertRaisesRegexp(ValueError,
                                     "should have the same length"):
            concat_layer.compute_mask([i1, i2], [None])
        with self.assertRaisesRegexp(
                ValueError, "layer should be called on a list of inputs"):
            concat_layer(i1)
示例#27
0
    def test_merge_add(self):
        i1 = keras.layers.Input(shape=(4, 5))
        i2 = keras.layers.Input(shape=(4, 5))
        i3 = keras.layers.Input(shape=(4, 5))

        add_layer = keras.layers.Add()
        o = add_layer([i1, i2, i3])
        self.assertListEqual(o.shape.as_list(), [None, 4, 5])
        model = keras.models.Model([i1, i2, i3], o)
        model.run_eagerly = testing_utils.should_run_eagerly()
        model._experimental_run_tf_function = testing_utils.should_run_tf_function(
        )

        x1 = np.random.random((2, 4, 5))
        x2 = np.random.random((2, 4, 5))
        x3 = np.random.random((2, 4, 5))
        out = model.predict([x1, x2, x3])
        self.assertEqual(out.shape, (2, 4, 5))
        self.assertAllClose(out, x1 + x2 + x3, atol=1e-4)

        self.assertEqual(
            add_layer.compute_mask([i1, i2, i3], [None, None, None]), None)
        self.assertTrue(
            np.all(
                K.eval(
                    add_layer.compute_mask(
                        [i1, i2],
                        [K.variable(x1), K.variable(x2)]))))

        with self.assertRaisesRegexp(ValueError, "`mask` should be a list."):
            add_layer.compute_mask([i1, i2, i3], x1)
        with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."):
            add_layer.compute_mask(i1, [None, None, None])
        with self.assertRaisesRegexp(ValueError,
                                     " should have the same length."):
            add_layer.compute_mask([i1, i2, i3], [None, None])
示例#28
0
  def test_sequential_model_with_ds_input(self):
    columns = [fc.numeric_column('a')]
    model = keras.models.Sequential([
        fc.DenseFeatures(columns),
        keras.layers.Dense(64, activation='relu'),
        keras.layers.Dense(20, activation='softmax')
    ])
    model.compile(
        optimizer='rmsprop',
        loss='categorical_crossentropy',
        metrics=['accuracy'],
        run_eagerly=testing_utils.should_run_eagerly(),
        experimental_run_tf_function=testing_utils.should_run_tf_function())

    y = np.random.randint(20, size=(100, 1))
    y = keras.utils.to_categorical(y, num_classes=20)
    x = {'a': np.random.random((100, 1))}
    ds1 = dataset_ops.Dataset.from_tensor_slices(x)
    ds2 = dataset_ops.Dataset.from_tensor_slices(y)
    ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
    model.fit(ds, steps_per_epoch=1)
    model.fit(ds, steps_per_epoch=1)
    model.evaluate(ds, steps=1)
    model.predict(ds, steps=1)
def _run_batchnorm_correctness_test(layer, dtype='float32', fused=False):
  model = keras.models.Sequential()
  model.add(keras.Input(shape=(2, 2, 2), dtype=dtype))
  norm = layer(momentum=0.8, fused=fused)
  model.add(norm)
  if dtype == 'float16':
    # Keras models require float32 losses.
    model.add(keras.layers.Lambda(lambda x: keras.backend.cast(x, 'float32')))
  model.compile(
      loss='mse',
      optimizer=gradient_descent.GradientDescentOptimizer(0.01),
      run_eagerly=testing_utils.should_run_eagerly(),
      experimental_run_tf_function=testing_utils.should_run_tf_function())

  # centered on 5.0, variance 10.0
  x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
       .astype(dtype))
  model.fit(x, x, epochs=4, verbose=0)
  out = model.predict(x)
  out -= keras.backend.eval(norm.beta)
  out /= keras.backend.eval(norm.gamma)

  np.testing.assert_allclose(out.mean(), 0.0, atol=2e-1)
  np.testing.assert_allclose(out.std(), 1.0, atol=2e-1)
    def test_statefulness_LSTM(self):
        num_samples = 2
        timesteps = 3
        embedding_dim = 4
        units = 2
        layer_class = keras.layers.LSTM
        model = keras.models.Sequential()
        model.add(
            keras.layers.Embedding(4,
                                   embedding_dim,
                                   mask_zero=True,
                                   input_length=timesteps,
                                   batch_input_shape=(num_samples, timesteps)))
        layer = layer_class(units,
                            return_sequences=False,
                            stateful=True,
                            weights=None)
        model.add(layer)
        model.compile(
            optimizer=gradient_descent.GradientDescentOptimizer(0.01),
            loss='mse',
            run_eagerly=testing_utils.should_run_eagerly(),
            experimental_run_tf_function=testing_utils.should_run_tf_function(
            ))
        out1 = model.predict(np.ones((num_samples, timesteps)))
        self.assertEqual(out1.shape, (num_samples, units))

        # train once so that the states change
        model.train_on_batch(np.ones((num_samples, timesteps)),
                             np.ones((num_samples, units)))
        out2 = model.predict(np.ones((num_samples, timesteps)))

        # if the state is not reset, output should be different
        self.assertNotEqual(out1.max(), out2.max())

        # check that output changes after states are reset
        # (even though the model itself didn't change)
        layer.reset_states()
        out3 = model.predict(np.ones((num_samples, timesteps)))
        self.assertNotEqual(out2.max(), out3.max())

        # check that container-level reset_states() works
        model.reset_states()
        out4 = model.predict(np.ones((num_samples, timesteps)))
        self.assertAllClose(out3, out4, atol=1e-5)

        # check that the call to `predict` updated the states
        out5 = model.predict(np.ones((num_samples, timesteps)))
        self.assertNotEqual(out4.max(), out5.max())

        # Check masking
        layer.reset_states()

        left_padded_input = np.ones((num_samples, timesteps))
        left_padded_input[0, :1] = 0
        left_padded_input[1, :2] = 0
        out6 = model.predict(left_padded_input)

        layer.reset_states()

        right_padded_input = np.ones((num_samples, timesteps))
        right_padded_input[0, -1:] = 0
        right_padded_input[1, -2:] = 0
        out7 = model.predict(right_padded_input)

        self.assertAllClose(out7, out6, atol=1e-5)