Exemple #1
0
def FocalLoss_model(input_shape, output_sequence_length, source_vocab_size,
                    target_vocab_size):
    """
    Build and train a RNN model using word embedding on x and y
    :param input_shape: Tuple of input shape
    :param output_sequence_length: Length of output sequence
    :param english_vocab_size: Number of unique English words in the dataset
    :param french_vocab_size: Number of unique French words in the dataset
    :return: Keras model built, but not trained
    """
    input_seq = keras.Input(input_shape[1:])
    if output_sequence_length > input_shape[1]:
        expanded_seq = keras.backend.squeeze(keras.layers.ZeroPadding1D(
            (0, output_sequence_length - input_shape[1]))(keras.layers.Reshape(
                (input_shape[1], 1))(input_seq)),
                                             axis=-1)
    else:
        expanded_seq = input_seq

    embedded_seq = keras.layers.TimeDistributed(
        keras.layers.BatchNormalization(axis=-1))(keras.layers.Embedding(
            source_vocab_size,
            embeddings_units,
            input_length=output_sequence_length)(expanded_seq))
    rnn = keras.layers.TimeDistributed(keras.layers.BatchNormalization(
        axis=-1))(keras.layers.GRU(gru_units,
                                   return_sequences=True)(embedded_seq))
    probabilities = keras.layers.TimeDistributed(
        keras.layers.Dense(target_vocab_size, activation='softmax'))(rnn)

    model = keras.Model(input_seq, probabilities)

    model.compile(loss=focal_loss(alpha=.25, gamma=2),
                  optimizer=keras.optimizers.Adam(learning_rate, clipnorm=3.0),
                  metrics=['accuracy'])
    return model
def get_nested_model_3(input_dim, num_classes):
    # A functional-API model with a subclassed model inside.
    # NOTE: this requires the inner subclass to implement `compute_output_shape`.

    inputs = keras.Input(shape=(input_dim, ))
    x = keras.layers.Dense(32, activation='relu')(inputs)
    x = keras.layers.BatchNormalization()(x)

    class Inner(keras.Model):
        def __init__(self):
            super(Inner, self).__init__()
            self.dense1 = keras.layers.Dense(32, activation='relu')
            self.dense2 = keras.layers.Dense(5, activation='relu')
            self.bn = keras.layers.BatchNormalization()

        def call(self, inputs):
            x = self.dense1(inputs)
            x = self.dense2(x)
            return self.bn(x)

    test_model = Inner()
    x = test_model(x)
    outputs = keras.layers.Dense(num_classes)(x)
    return keras.Model(inputs, outputs, name='nested_model_3')
  def test_getitem_slice_with_stop_and_ellipsis_only(self):
    if not context.executing_eagerly():
      self.skipTest('Complex slicing like this fails in v1')
    inp = keras.Input(shape=(4, 3, 8))
    slice_stop = keras.Input(shape=(), dtype='int32')

    out = inp[..., :slice_stop[0]]
    model = keras.Model(
        inputs=[inp, slice_stop],
        outputs=out)
    model.compile(
        adam.Adam(0.001),
        'mse',
        run_eagerly=testing_utils.should_run_eagerly())
    batch_size = 7
    stop = 6
    x = array_ops.stack([
        math_ops.range(8) for _ in range(batch_size)])
    args = [x, constant_op.constant(stop, shape=(batch_size,))]
    expected = array_ops.stack([
        math_ops.range(8)[:stop] for _ in range(batch_size)])

    if keras_tensor.keras_tensors_enabled():
      self.assertIn('tf.__operators__.getitem', (
          x.name for x in model.layers))
      self.assertNotIn('tf.strided_slice', (
          x.name for x in model.layers))
    self.assertAllEqual(model(args), expected)
    self.assertAllEqual(model.predict(args, batch_size=batch_size), expected)

    # Make sure it can be successfully saved and loaded
    config = model.get_config()
    model = keras.Model.from_config(config)

    self.assertAllEqual(model(args), expected)
    self.assertAllEqual(model.predict(args, batch_size=batch_size), expected)
Exemple #4
0
    def test_sequential_as_downstream_of_masking_layer(self):
        inputs = keras.layers.Input(shape=(3, 4))
        x = keras.layers.Masking(mask_value=0., input_shape=(3, 4))(inputs)

        s = keras.Sequential()
        s.add(keras.layers.Dense(5, input_shape=(4, )))

        x = keras.layers.wrappers.TimeDistributed(s)(x)
        model = keras.Model(inputs=inputs, outputs=x)
        model.compile(optimizer=rmsprop.RMSPropOptimizer(1e-3), loss='mse')

        model_input = np.random.randint(low=1, high=5,
                                        size=(10, 3, 4)).astype('float32')
        for i in range(4):
            model_input[i, i:, :] = 0.
        model.fit(model_input,
                  np.random.random((10, 3, 5)),
                  epochs=1,
                  batch_size=6)

        if not context.executing_eagerly():
            # Note: this doesn't work in eager due to DeferredTensor/ops compatibility
            # issue.
            mask_outputs = [
                model.layers[1].compute_mask(model.layers[1].input)
            ]
            mask_outputs += [
                model.layers[2].compute_mask(model.layers[2].input,
                                             mask_outputs[-1])
            ]
            func = keras.backend.function([model.input], mask_outputs)
            mask_outputs_val = func([model_input])
            self.assertAllClose(mask_outputs_val[0],
                                np.any(model_input, axis=-1))
            self.assertAllClose(mask_outputs_val[1],
                                np.any(model_input, axis=-1))
Exemple #5
0
    def test_sparse_int_input_multi_bucket(self):
        vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
        input_array = sparse_tensor.SparseTensor(
            indices=[[0, 0], [1, 2]],
            values=np.array([13, 133], dtype=np.int64),
            dense_shape=[3, 4])

        expected_indices = [[0, 0], [1, 2]]
        expected_values = [6, 2]
        expected_dense_shape = [3, 4]

        input_data = keras.Input(shape=(None, ),
                                 dtype=dtypes.int64,
                                 sparse=True)
        layer = get_layer_class()(max_tokens=None,
                                  dtype=dtypes.int64,
                                  num_oov_tokens=2)
        layer.set_vocabulary(vocab_data)
        int_data = layer(input_data)
        model = keras.Model(inputs=input_data, outputs=int_data)
        output_data = model.predict(input_array, steps=1)
        self.assertAllEqual(expected_indices, output_data.indices)
        self.assertAllEqual(expected_values, output_data.values)
        self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
Exemple #6
0
    def test_batchnorm_non_trainable_with_fit(self):
        inputs = keras.Input((3, ))
        bn = normalization_v2.BatchNormalization()
        outputs = bn(inputs)
        model = keras.Model(inputs, outputs)
        model.compile('rmsprop',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())
        model.fit(np.random.random((100, 3)), np.random.random((100, 3)))

        test_data = np.random.random((10, 3))
        test_targets = np.random.random((10, 3))
        test_loss = model.evaluate(test_data, test_targets)

        bn.trainable = False
        model.compile('rmsprop',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())
        train_loss = model.train_on_batch(test_data, test_targets)
        self.assertAlmostEqual(test_loss, train_loss)
Exemple #7
0
def get_mnist_model(input_shape):
  """Define a deterministically-initialized CNN model for MNIST testing."""
  inputs = keras.Input(shape=input_shape)
  x = keras.layers.Conv2D(
      32,
      kernel_size=(3, 3),
      activation="relu",
      kernel_initializer=keras.initializers.TruncatedNormal(seed=99))(inputs)
  x = keras.layers.BatchNormalization()(x)
  x = keras.layers.Flatten()(x) + keras.layers.Flatten()(x)
  x = keras.layers.Dense(
      10,
      activation="softmax",
      kernel_initializer=keras.initializers.TruncatedNormal(seed=99))(x)
  model = keras.Model(inputs=inputs, outputs=x)

  # TODO(yuefengz): optimizer with slot variables doesn't work because of
  # optimizer's bug.
  # TODO(yuefengz): we should not allow non-v2 optimizer.
  model.compile(
      loss=keras.losses.sparse_categorical_crossentropy,
      optimizer=gradient_descent.SGD(learning_rate=0.001),
      metrics=["accuracy"])
  return model
def get_model_from_layers_with_input(layers,
                                     input_shape=None,
                                     input_dtype=None,
                                     model_input=None):
  """Builds a model from a sequence of layers."""
  if model_input is not None and input_shape is not None:
    raise ValueError("Cannot specify a model_input and an input shape.")

  model_type = testing_utils.get_model_type()
  if model_type == "subclass":
    return _SubclassModel(layers, model_input)

  if model_type == "sequential":
    model = keras.models.Sequential()
    if model_input is not None:
      model.add(model_input)
    elif input_shape is not None:
      model.add(keras.Input(shape=input_shape, dtype=input_dtype))
    for layer in layers:
      model.add(layer)
    return model

  if model_type == "functional":
    if model_input is not None:
      inputs = model_input
    else:
      if not input_shape:
        raise ValueError("Cannot create a functional model from layers with no "
                         "input shape.")
      inputs = keras.Input(shape=input_shape, dtype=input_dtype)
    outputs = inputs
    for layer in layers:
      outputs = layer(outputs)
    return keras.Model(inputs, outputs)

  raise ValueError("Unknown model type {}".format(model_type))
Exemple #9
0
  def test_saved_weights_keras(self):
    input_data = [[1], [2], [3]]
    expected_output = [[0], [1], [2]]

    cls = discretization.Discretization
    inputs = keras.Input(shape=(1,), dtype=dtypes.int32)
    layer = cls(num_bins=3)
    layer.adapt(input_data)
    outputs = layer(inputs)
    model = keras.Model(inputs=inputs, outputs=outputs)

    output_data = model.predict(input_data)
    self.assertAllClose(output_data, expected_output)

    # Save the model to disk.
    output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_weights")
    model.save_weights(output_path, save_format="tf")
    new_model = keras.Model.from_config(
        model.get_config(), custom_objects={"Discretization": cls})
    new_model.load_weights(output_path)

    # Validate correctness of the new model.
    new_output_data = new_model.predict(input_data)
    self.assertAllClose(new_output_data, expected_output)
Exemple #10
0
  def test_model_saving_to_pre_created_h5py_file(self):
    if h5py is None:
      self.skipTest('h5py required to run this test')

    with self.test_session():
      inputs = keras.Input(shape=(3,))
      x = keras.layers.Dense(2)(inputs)
      outputs = keras.layers.Dense(3)(x)

      model = keras.Model(inputs, outputs)
      model.compile(loss=keras.losses.MSE,
                    optimizer=keras.optimizers.Adam(),
                    metrics=[keras.metrics.categorical_accuracy])
      x = np.random.random((1, 3))
      y = np.random.random((1, 3))
      model.train_on_batch(x, y)

      out = model.predict(x)
      fd, fname = tempfile.mkstemp('.h5')
      with h5py.File(fname, mode='r+') as h5file:
        keras.models.save_model(model, h5file)
        loaded_model = keras.models.load_model(h5file)
        out2 = loaded_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)

      # Test non-default options in h5
      with h5py.File('_', driver='core',
                     backing_store=False) as h5file:
        keras.models.save_model(model, h5file)
        loaded_model = keras.models.load_model(h5file)
        out2 = loaded_model.predict(x)
      self.assertAllClose(out, out2, atol=1e-05)

      # Cleanup
      os.close(fd)
      os.remove(fname)
  def test_train_eval_with_steps(self):
    # See b/142880049 for more details.
    inp = keras.Input(shape=(4,), name='inp1')
    out = keras.layers.Dense(2)(inp)
    model = keras.Model(inp, out)
    model.compile(
        'rmsprop', loss='mse', run_eagerly=testing_utils.should_run_eagerly())

    inputs = np.zeros((100, 4), dtype=np.float32)
    targets = np.random.randint(0, 2, size=100, dtype=np.int32)
    training_ds = dataset_ops.Dataset.from_tensor_slices(
        (inputs, targets)).repeat().batch(10)

    # Create eval dataset with generator, so that dataset won't contain the
    # overall size metadata. Without eval_steps, we expect to run through all
    # the data in this dataset every epoch.
    def gen():
      for _ in range(100):
        yield (np.zeros(4, dtype=np.float32),
               np.random.randint(0, 2, size=1, dtype=np.int32))

    eval_ds = dataset_ops.Dataset.from_generator(
        generator=gen,
        output_types=('float64', 'int32'),
        output_shapes=([4], [1])).batch(100)
    batch_counter = BatchCounterCallback()

    model.fit(
        training_ds,
        steps_per_epoch=10,
        epochs=10,
        validation_data=eval_ds,
        callbacks=[batch_counter])

    # Expect 10 batch from training per epoch.
    self.assertEqual(batch_counter.batch_end_count, 100)
    def create_model(self,
                     model_name,
                     input_size,
                     embedding_size,
                     num_classes,
                     include_top=False):
        base_network = model_sets[model_name](input_shape=(input_size,
                                                           input_size, 3),
                                              include_top=include_top,
                                              weights='imagenet')

        base_network.trainable = True
        inputs = base_network.input
        x = base_network(inputs)
        x = layers.GlobalAveragePooling2D()(x)
        embedding = layers.Dense(embedding_size, name='embedding')(x)
        logits = layers.Dense(num_classes, name='logits')(embedding)
        model = keras.Model(inputs=inputs,
                            outputs={
                                'embedding': embedding,
                                'logits': logits
                            })
        model.summary()
        return model
Exemple #13
0
  def get_model(self,
                max_words=10,
                initial_weights=None,
                distribution=None,
                experimental_run_tf_function=None,
                input_shapes=None):
    del input_shapes

    if tf2.enabled():
      if not context.executing_eagerly():
        self.skipTest("LSTM v2 and legacy graph mode don't work together.")
      lstm = rnn_v2.LSTM
    else:
      lstm = rnn_v1.LSTM

    with keras_correctness_test_base.MaybeDistributionScope(distribution):
      word_ids = keras.layers.Input(
          shape=(max_words,), dtype=np.int32, name='words')
      word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids)
      lstm_embed = lstm(units=4, return_sequences=False)(
          word_embed)

      preds = keras.layers.Dense(2, activation='softmax')(lstm_embed)
      model = keras.Model(inputs=[word_ids], outputs=[preds])

      if initial_weights:
        model.set_weights(initial_weights)

      optimizer_fn = gradient_descent_keras.SGD

      model.compile(
          optimizer=optimizer_fn(learning_rate=0.1),
          loss='sparse_categorical_crossentropy',
          metrics=['sparse_categorical_accuracy'],
          experimental_run_tf_function=experimental_run_tf_function)
    return model
  def test_one_hot_output_rank_one_input(self):
    input_data = np.array([3, 2, 0, 1])
    expected_output = [
        [0, 0, 0, 1],
        [0, 0, 1, 0],
        [1, 0, 0, 0],
        [0, 1, 0, 0],
    ]
    num_tokens = 4
    expected_output_shape = [None, num_tokens]

    # Test call on layer directly.
    layer = category_encoding.CategoryEncoding(
        num_tokens=num_tokens, output_mode=category_encoding.ONE_HOT)
    output_data = layer(input_data)
    self.assertAllEqual(expected_output, output_data)

    # Test call on model.
    inputs = keras.Input(shape=(1,), dtype=dtypes.int32)
    outputs = layer(inputs)
    model = keras.Model(inputs=inputs, outputs=outputs)
    output_data = model(input_data)
    self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
    self.assertAllEqual(expected_output, output_data)
Exemple #15
0
  def test_distribution_strategy_output(self, distribution):
    vocab_data = ["earth", "wind", "and", "fire"]
    input_array = np.array([["earth", "wind", "and", "fire"],
                            ["fire", "and", "earth", "michigan"]])
    input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
        2, drop_remainder=True)

    expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]

    config.set_soft_device_placement(True)

    with distribution.scope():
      input_data = keras.Input(shape=(None,), dtype=dtypes.string)
      layer = get_layer_class()(
          max_tokens=None,
          standardize=None,
          split=None,
          output_mode=text_vectorization.INT)
      layer.set_vocabulary(vocab_data)
      int_data = layer(input_data)
      model = keras.Model(inputs=input_data, outputs=int_data)

    output_dataset = model.predict(input_dataset)
    self.assertAllEqual(expected_output, output_dataset)
Exemple #16
0
    def test_multiple_ngram_values(self):
        input_array = np.array([["earth", "wind", "and", "fire"],
                                ["fire", "and", "earth", "michigan"]])
        # pyformat: disable
        expected_output = [[
            b"earth wind", b"wind and", b"and fire", b"earth wind and",
            b"wind and fire"
        ],
                           [
                               b"fire and", b"and earth", b"earth michigan",
                               b"fire and earth", b"and earth michigan"
                           ]]
        # pyformat: enable

        input_data = keras.Input(shape=(4, ), dtype=dtypes.string)
        layer = get_layer_class()(max_tokens=None,
                                  standardize=None,
                                  split=None,
                                  ngrams=(2, 3),
                                  output_mode=None)
        int_data = layer(input_data)
        model = keras.Model(inputs=input_data, outputs=int_data)
        output_dataset = model.predict(input_array)
        self.assertAllEqual(expected_output, output_dataset)
Exemple #17
0
    def load_keras_model(model_dir, max_seq_len):
        from tensorflow.python import keras
        from bert import BertModelLayer
        from bert.loader import StockBertConfig, load_stock_weights

        bert_config_file = os.path.join(model_dir, "bert_config.json")
        bert_ckpt_file   = os.path.join(model_dir, "bert_model.ckpt")

        l_bert = None
        with tf.io.gfile.GFile(bert_config_file, "r") as reader:
            bc = StockBertConfig.from_json_string(reader.read())
            l_bert = BertModelLayer.from_params(bc.to_bert_model_layer_params(), name="bert")

        l_input_ids      = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="input_ids")
        l_token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="token_type_ids")

        output = l_bert([l_input_ids, l_token_type_ids])

        model = keras.Model(inputs=[l_input_ids, l_token_type_ids], outputs=output)
        model.build(input_shape=[(None, max_seq_len),
                                 (None, max_seq_len)])

        load_stock_weights(l_bert, bert_ckpt_file)
        return model
Exemple #18
0
    def test_if_training_pattern_update(self):
        class MyLayer(keras.layers.Layer):
            def build(self, input_shape):
                self.counter = self.add_weight(shape=(),
                                               trainable=False,
                                               initializer='zeros')

            def call(self, inputs, training=None):
                if training:
                    increment = 1.
                else:
                    increment = 0.
                self.counter.assign_add(increment)
                return inputs

        inputs = keras.Input((3, ))
        layer = MyLayer()
        outputs = layer(inputs)
        model = keras.Model(inputs, outputs)
        model.compile('sgd',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly())
        model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
        self.assertEqual(keras.backend.get_value(layer.counter), 1.)
Exemple #19
0
    def get_model(self, initial_weights=None, distribution=None):
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            image = keras.layers.Input(shape=(28, 28, 3), name='image')
            c1 = keras.layers.Conv2D(
                name='conv1',
                filters=16,
                kernel_size=(3, 3),
                strides=(4, 4),
                kernel_regularizer=keras.regularizers.l2(1e-4))(image)
            if self.with_batch_norm:
                c1 = keras.layers.BatchNormalization(name='bn1')(c1)
            c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1)
            logits = keras.layers.Dense(10, activation='softmax', name='pred')(
                keras.layers.Flatten()(c1))
            model = keras.Model(inputs=[image], outputs=[logits])

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(optimizer=gradient_descent.SGD(learning_rate=0.1),
                          loss='sparse_categorical_crossentropy',
                          metrics=['sparse_categorical_accuracy'])

        return model
Exemple #20
0
    def test_if_training_pattern_metric(self):
        class MyLayer(keras.layers.Layer):
            def call(self, inputs, training=None):
                if training:
                    metric = math_ops.reduce_sum(inputs)
                else:
                    metric = 0.
                self.add_metric(metric, name='my_metric', aggregation='mean')
                return inputs

        inputs = keras.Input((3, ))
        outputs = MyLayer()(inputs)
        model = keras.Model(inputs, outputs)
        model.compile('sgd',
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly())
        for _ in range(3):
            _, train_metric = model.train_on_batch(np.ones((2, 3)),
                                                   np.ones((2, 3)))

            self.assertEqual(train_metric, 2 * 3)
            _, test_metric = model.test_on_batch(np.ones((2, 3)),
                                                 np.ones((2, 3)))
            self.assertEqual(test_metric, 0)
  def test_learning_phase_value(self):
    # TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
    # meaningful values. Currently we don't pass the learning phase if the
    # Lambda layer uses the learning phase.
    with self.cached_session():
      x = keras.layers.Input(shape=(1,), name='input')
      y = keras.layers.Dense(1, kernel_initializer='ones')(x)
      z = keras.layers.Dropout(0.9999)(y)
      model = keras.Model(x, z)
      initial_weights = model.get_weights()

      optimizer = gradient_descent.GradientDescentOptimizer(0.005)
      loss = 'mse'
      metrics = ['acc']
      strategy = mirrored_strategy.MirroredStrategy(
          ['/device:GPU:0', '/device:GPU:1'])

      model.compile(optimizer, loss, metrics=metrics, distribute=strategy)

      inputs = np.ones((10, 1), dtype=np.float32)
      targets = np.ones((10, 1), dtype=np.float32)
      dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat().batch(8)
      hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
      self.assertAlmostEqual(hist.history['acc'][0], 0, 0)

      model.set_weights(initial_weights)
      evaluate_output = model.evaluate(dataset, steps=20)
      self.assertAlmostEqual(evaluate_output[1], 1, 0)

      inputs = np.ones((10, 1), dtype=np.float32)
      predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
      predict_dataset = predict_dataset.repeat().batch(5)
      output = model.predict(predict_dataset, steps=10)
      ref_output = np.ones((50, 1), dtype=np.float32)
      self.assertArrayNear(output[0], ref_output, 1e-1)
Exemple #22
0
  def test_sparse_adapt(self):
    vocab_data = sparse_ops.from_dense(
        np.array([[1, 1, 0, 1, 1, 2, 2, 0, 2, 3, 3, 0, 4]], dtype=np.int64))
    vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
    input_array = sparse_ops.from_dense(
        np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64))

    # pyformat: disable
    expected_output = [[0, 1, 1, 1, 0],
                       [0, 1, 0, 1, 0]]
    # pyformat: enable
    max_tokens = 5
    expected_output_shape = [None, max_tokens]

    input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)
    layer = get_layer_class()(
        max_tokens=None, output_mode=category_encoding.BINARY)
    layer.adapt(vocab_dataset)
    int_data = layer(input_data)
    self.assertAllEqual(expected_output_shape, int_data.shape.as_list())

    model = keras.Model(inputs=input_data, outputs=int_data)
    output_dataset = model.predict(input_array, steps=1)
    self.assertAllEqual(expected_output, output_dataset)
Exemple #23
0
    def testSingleInputCase(self):
        class LayerWithOneInput(keras.layers.Layer):
            def build(self, input_shape):
                self.w = array_ops.ones(shape=(3, 4))

            def call(self, inputs):
                return keras.backend.dot(inputs, self.w)

        inputs = input_layer_lib.Input(shape=(3, ))
        layer = LayerWithOneInput()

        if context.executing_eagerly():
            self.assertEqual(
                layer.compute_output_shape((None, 3)).as_list(), [None, 4])
            # As a side-effect, compute_output_shape builds the layer.
            self.assertTrue(layer.built)
            # We can still query the layer's compute_output_shape with compatible
            # input shapes.
            self.assertEqual(
                layer.compute_output_shape((6, 3)).as_list(), [6, 4])

        outputs = layer(inputs)
        model = keras.Model(inputs, outputs)
        self._testShapeInference(model, (2, 3), (2, 4))
Exemple #24
0
  def test_tfidf_output_soft_maximum(self):
    tfidf_data = [.05, .5, .25, .2, .125]
    input_array = np.array([[1, 2, 3, 1], [0, 4, 1, 0]])

    # pyformat: disable
    # pylint: disable=bad-whitespace
    expected_output = [[ 0,  1, .25, .2,    0],
                       [.1, .5,   0,  0, .125]]
    # pylint: enable=bad-whitespace
    # pyformat: enable
    max_tokens = 5
    expected_output_shape = [None, max_tokens]

    input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
    layer = get_layer_class()(
        max_tokens=None, output_mode=category_encoding.TFIDF)
    layer.set_num_elements(max_tokens)
    layer.set_tfidf_data(tfidf_data)
    int_data = layer(input_data)
    self.assertAllEqual(expected_output_shape, int_data.shape.as_list())

    model = keras.Model(inputs=input_data, outputs=int_data)
    output_dataset = model.predict(input_array)
    self.assertAllClose(expected_output, output_dataset)
Exemple #25
0
    def test_embedding_with_ragged_input(self):
        layer = keras.layers.Embedding(
            input_dim=3,
            output_dim=2,
            weights=[np.array([[0., 0.], [1., 1.], [2., 2.]])])
        inputs = keras.layers.Input(shape=(None, ),
                                    dtype=dtypes.float32,
                                    ragged=True)
        # pylint: disable=unnecessary-lambda
        outputs = keras.layers.Lambda(
            lambda args: keras.backend.identity(args))(inputs)
        # pylint: enable=unnecessary-lambda
        outputs = layer(outputs)

        model = keras.Model(inputs, outputs)
        model.run_eagerly = testing_utils.should_run_eagerly()
        outputs = model.predict(
            ragged_factory_ops.constant([[1., 2., 2.], [0.], [1., 2.]],
                                        ragged_rank=1))
        self.assertAllClose(
            outputs,
            ragged_factory_ops.constant([[[1., 1.], [2., 2.], [2., 2.]],
                                         [[0., 0.]], [[1., 1.], [2., 2.]]],
                                        ragged_rank=1))
Exemple #26
0
    def test_calling_model_on_same_dataset(self):
        with self.test_session():
            x = keras.layers.Input(shape=(3, ), name='input')
            y = keras.layers.Dense(4, name='dense')(x)
            model = keras.Model(x, y)

            optimizer = gradient_descent.GradientDescentOptimizer(0.001)
            loss = 'mse'
            metrics = ['mae']
            strategy = mirrored_strategy.MirroredStrategy(
                ['/device:GPU:1', '/device:GPU:0'])
            model.compile(optimizer,
                          loss,
                          metrics=metrics,
                          distribute=strategy)

            inputs = np.zeros((10, 3), dtype=np.float32)
            targets = np.zeros((10, 4), dtype=np.float32)
            dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
            dataset = dataset.repeat(100)
            dataset = dataset.batch(10)

            # Call fit with validation data
            model.fit(dataset,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_data=dataset,
                      validation_steps=2)
            model.fit(dataset,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_data=dataset,
                      validation_steps=2)
            model.predict(dataset, steps=2)
Exemple #27
0
    def test_distribution(self, distribution):
        # TODO(b/159738418): large image input causes OOM in ubuntu multi gpu.
        np_images = np.random.random((32, 32, 32, 3)).astype(np.float32)
        image_dataset = dataset_ops.Dataset.from_tensor_slices(
            np_images).batch(16, drop_remainder=True)

        with distribution.scope():
            input_data = keras.Input(shape=(32, 32, 3), dtype=dtypes.float32)
            image_preprocessor = keras.Sequential([
                image_preprocessing.Resizing(height=256, width=256),
                image_preprocessing.RandomCrop(height=224, width=224),
                image_preprocessing.RandomTranslation(.1, .1),
                image_preprocessing.RandomRotation(.2),
                image_preprocessing.RandomFlip(),
                image_preprocessing.RandomZoom(.2, .2)
            ])
            preprocessed_image = image_preprocessor(input_data)
            flatten_layer = keras.layers.Flatten(data_format="channels_last")
            output = flatten_layer(preprocessed_image)
            cls_layer = keras.layers.Dense(units=1, activation="sigmoid")
            output = cls_layer(output)
            model = keras.Model(inputs=input_data, outputs=output)
        model.compile(loss="binary_crossentropy")
        _ = model.predict(image_dataset)
  def test_updates_and_losses_for_nested_models_in_subclassed_model(self):

    # Case 1: deferred-build sequential nested in subclass.
    class TestModel1(keras.Model):

      def __init__(self):
        super(TestModel1, self).__init__()
        self.fc = keras.layers.Dense(10, input_shape=(784,),
                                     activity_regularizer='l1')
        self.bn = keras.Sequential([keras.layers.BatchNormalization(axis=1)])

      def call(self, x):
        return self.bn(self.fc(x))

    with self.cached_session():
      model = TestModel1()

      x = array_ops.ones(shape=[100, 784], dtype='float32')
      model(x)
      self.assertEqual(len(model.get_updates_for(x)), 2)
      self.assertEqual(len(model.get_losses_for(x)), 1)

    # Case 2: placeholder-sequential nested in subclass.
    class TestModel2(keras.Model):

      def __init__(self):
        super(TestModel2, self).__init__()
        self.fc = keras.layers.Dense(10, input_shape=(784,),
                                     activity_regularizer='l1')
        self.bn = keras.Sequential(
            [keras.layers.BatchNormalization(axis=1, input_shape=(10,))])

      def call(self, x):
        return self.bn(self.fc(x))

    with self.cached_session():
      model = TestModel2()

      x = array_ops.ones(shape=[100, 784], dtype='float32')
      model(x)
      self.assertEqual(len(model.get_updates_for(x)), 2)
      self.assertEqual(len(model.get_losses_for(x)), 1)

    # Case 3: functional-API model nested in subclass.
    inputs = keras.Input((10,))
    outputs = keras.layers.BatchNormalization(axis=1)(inputs)
    bn = keras.Model(inputs, outputs)

    class TestModel3(keras.Model):

      def __init__(self):
        super(TestModel3, self).__init__()
        self.fc = keras.layers.Dense(10, input_shape=(784,),
                                     activity_regularizer='l1')
        self.bn = bn

      def call(self, x):
        return self.bn(self.fc(x))

    with self.cached_session():
      model = TestModel3()

      x = array_ops.ones(shape=[100, 784], dtype='float32')
      model(x)
      self.assertEqual(len(model.get_updates_for(x)), 2)
      self.assertEqual(len(model.get_losses_for(x)), 1)
Exemple #29
0
 def test_functional_model(self):
     inputs = keras.Input(10)
     outputs = keras.layers.Dense(1)(inputs)
     model = keras.Model(inputs, outputs)
     self._check_model_class(model.__class__.__bases__[0])
     self._check_layer_class(model)
Exemple #30
0
def get_model():
    x = keras.layers.Input(shape=(3, ), name='input')
    y = keras.layers.Dense(4, name='dense')(x)
    model = keras.Model(x, y)
    return model