def get_model_from_layers(layers,
                          input_shape=None,
                          input_dtype=None,
                          name=None,
                          input_ragged=None,
                          input_sparse=None):
    """Builds a model from a sequence of layers.

  Args:
    layers: The layers used to build the network.
    input_shape: Shape tuple of the input or 'TensorShape' instance.
    input_dtype: Datatype of the input.
    name: Name for the model.
    input_ragged: Boolean, whether the input data is a ragged tensor.
    input_sparse: Boolean, whether the input data is a sparse tensor.

  Returns:
    A Keras model.
  """

    model_type = get_model_type()
    if model_type == 'subclass':
        inputs = None
        if input_ragged or input_sparse:
            inputs = keras.Input(shape=input_shape,
                                 dtype=input_dtype,
                                 ragged=input_ragged,
                                 sparse=input_sparse)
        return _SubclassModel(layers, name=name, input_tensor=inputs)

    if model_type == 'subclass_custom_build':
        layer_generating_func = lambda: layers
        return _SubclassModelCustomBuild(layer_generating_func, name=name)

    if model_type == 'sequential':
        model = keras.models.Sequential(name=name)
        if input_shape:
            model.add(
                keras.layers.InputLayer(input_shape=input_shape,
                                        dtype=input_dtype,
                                        ragged=input_ragged,
                                        sparse=input_sparse))
        for layer in layers:
            model.add(layer)
        return model

    if model_type == 'functional':
        if not input_shape:
            raise ValueError(
                'Cannot create a functional model from layers with no '
                'input shape.')
        inputs = keras.Input(shape=input_shape,
                             dtype=input_dtype,
                             ragged=input_ragged,
                             sparse=input_sparse)
        outputs = inputs
        for layer in layers:
            outputs = layer(outputs)
        return keras.Model(inputs, outputs, name=name)

    raise ValueError('Unknown model type {}'.format(model_type))
Exemple #2
0
def _single_op_with_attrs():
    inputs = keras.Input(shape=(10, ))
    x = math_ops.reduce_mean(inputs, axis=1, keepdims=True)
    outputs = keras.layers.Dense(10)(x)
    return inputs, outputs
Exemple #3
0
    def test_minimal_rnn_cell_layer(self):
        class MinimalRNNCell(keras.layers.Layer):
            def __init__(self, units, **kwargs):
                self.units = units
                self.state_size = units
                super(MinimalRNNCell, self).__init__(**kwargs)

            def build(self, input_shape):
                self.kernel = self.add_weight(shape=(input_shape[-1],
                                                     self.units),
                                              initializer='uniform',
                                              name='kernel')
                self.recurrent_kernel = self.add_weight(
                    shape=(self.units, self.units),
                    initializer='uniform',
                    name='recurrent_kernel')
                self.built = True

            def call(self, inputs, states):
                prev_output = states[0]
                h = keras.backend.dot(inputs, self.kernel)
                output = h + keras.backend.dot(prev_output,
                                               self.recurrent_kernel)
                return output, [output]

            def get_config(self):
                config = {'units': self.units}
                base_config = super(MinimalRNNCell, self).get_config()
                return dict(list(base_config.items()) + list(config.items()))

        with self.cached_session():
            # Test basic case.
            x = keras.Input((None, 5))
            cell = MinimalRNNCell(32)
            layer = keras.layers.RNN(cell)
            y = layer(x)
            model = keras.models.Model(x, y)
            model.compile(optimizer='rmsprop', loss='mse')
            model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))

            # Test basic case serialization.
            x_np = np.random.random((6, 5, 5))
            y_np = model.predict(x_np)
            weights = model.get_weights()
            config = layer.get_config()
            with keras.utils.CustomObjectScope(
                {'MinimalRNNCell': MinimalRNNCell}):
                layer = keras.layers.RNN.from_config(config)
            y = layer(x)
            model = keras.models.Model(x, y)
            model.set_weights(weights)
            y_np_2 = model.predict(x_np)
            self.assertAllClose(y_np, y_np_2, atol=1e-4)

            # Test stacking.
            cells = [MinimalRNNCell(8), MinimalRNNCell(12), MinimalRNNCell(32)]
            layer = keras.layers.RNN(cells)
            y = layer(x)
            model = keras.models.Model(x, y)
            model.compile(optimizer='rmsprop', loss='mse')
            model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))

            # Test stacked RNN serialization.
            x_np = np.random.random((6, 5, 5))
            y_np = model.predict(x_np)
            weights = model.get_weights()
            config = layer.get_config()
            with keras.utils.CustomObjectScope(
                {'MinimalRNNCell': MinimalRNNCell}):
                layer = keras.layers.RNN.from_config(config)
            y = layer(x)
            model = keras.models.Model(x, y)
            model.set_weights(weights)
            y_np_2 = model.predict(x_np)
            self.assertAllClose(y_np, y_np_2, atol=1e-4)
Exemple #4
0
def _multiple_ops_at_end():
    inputs = keras.Input(shape=(10, ))
    x = keras.layers.Dense(10)(inputs)
    x = gen_nn_ops.relu(x)
    outputs = gen_nn_ops.relu(x)
    return keras.Model(inputs, outputs)
Exemple #5
0
def _single_op_in_middle():
    inputs = keras.Input(shape=(10, ))
    x = keras.layers.Dense(10)(inputs)
    x = gen_nn_ops.relu(x, name='hey')
    outputs = keras.layers.Dense(10)(x)
    return inputs, outputs
Exemple #6
0
def _add_n():
    inputs = keras.Input(shape=(10, ))
    outputs = math_ops.add_n([inputs, inputs, inputs])
    return keras.Model(inputs, outputs)
Exemple #7
0
def _inner_layer():
    inputs = keras.Input(shape=(10, ))
    outputs = LayerWithLayer()(inputs)
    return keras.Model(inputs, outputs)
    def test_updates_and_losses_for_nested_models_in_subclassed_model(self):

        # Case 1: deferred-build sequential nested in subclass.
        class TestModel1(keras.Model):
            def __init__(self):
                super(TestModel1, self).__init__()
                self.fc = keras.layers.Dense(10,
                                             input_shape=(784, ),
                                             activity_regularizer='l1')
                self.bn = keras.Sequential(
                    [keras.layers.BatchNormalization(axis=1)])

            def call(self, x):
                return self.bn(self.fc(x))

        with ops.get_default_graph().as_default(), self.cached_session():
            model = TestModel1()

            x = array_ops.ones(shape=[100, 784], dtype='float32')
            model(x)
            self.assertEqual(len(model.get_updates_for(x)), 2)
            self.assertEqual(len(model.get_losses_for(x)), 1)

        # Case 2: placeholder-sequential nested in subclass.
        class TestModel2(keras.Model):
            def __init__(self):
                super(TestModel2, self).__init__()
                self.fc = keras.layers.Dense(10,
                                             input_shape=(784, ),
                                             activity_regularizer='l1')
                self.bn = keras.Sequential([
                    keras.layers.BatchNormalization(axis=1, input_shape=(10, ))
                ])

            def call(self, x):
                return self.bn(self.fc(x))

        with ops.get_default_graph().as_default(), self.cached_session():
            model = TestModel2()

            x = array_ops.ones(shape=[100, 784], dtype='float32')
            model(x)
            self.assertEqual(len(model.get_updates_for(x)), 2)
            self.assertEqual(len(model.get_losses_for(x)), 1)

        # Case 3: functional-API model nested in subclass.
        with ops.get_default_graph().as_default():
            inputs = keras.Input((10, ))
            outputs = keras.layers.BatchNormalization(axis=1)(inputs)
            bn = keras.Model(inputs, outputs)

            class TestModel3(keras.Model):
                def __init__(self):
                    super(TestModel3, self).__init__()
                    self.fc = keras.layers.Dense(10,
                                                 input_shape=(784, ),
                                                 activity_regularizer='l1')
                    self.bn = bn

                def call(self, x):
                    return self.bn(self.fc(x))

            with self.cached_session():
                model = TestModel3()

                x = array_ops.ones(shape=[100, 784], dtype='float32')
                model(x)
                self.assertEqual(len(model.get_updates_for(x)), 2)
                self.assertEqual(len(model.get_losses_for(x)), 1)
 def test_eager_switch_case_input(self):
     with context.eager_mode():
         task = keras.Input(shape=(), dtype=dtypes.int32)
         control_flow_ops.switch_case(
             task[0],
             [lambda: constant_op.constant(1.0) for _ in range(10)])
  def test_rnn_with_time_major(self):
    batch = 10
    time_step = 5
    embedding_dim = 4
    units = 3

    # Test basic case.
    x = keras.Input((time_step, embedding_dim))
    time_major_x = keras.layers.Lambda(
        lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
    layer = keras.layers.SimpleRNN(
        units, time_major=True, return_sequences=True)
    self.assertEqual(
        layer.compute_output_shape((time_step, None,
                                    embedding_dim)).as_list(),
        [time_step, None, units])
    y = layer(time_major_x)
    self.assertEqual(layer.output_shape, (time_step, None, units))

    y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(y)

    model = keras.models.Model(x, y)
    model.compile(
        optimizer='rmsprop',
        loss='mse',
        run_eagerly=testing_utils.should_run_eagerly())
    model.train_on_batch(
        np.zeros((batch, time_step, embedding_dim)),
        np.zeros((batch, time_step, units)))

    # Test stacking.
    x = keras.Input((time_step, embedding_dim))
    time_major_x = keras.layers.Lambda(
        lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
    cell_units = [10, 8, 6]
    cells = [keras.layers.SimpleRNNCell(cell_units[i]) for i in range(3)]
    layer = keras.layers.RNN(cells, time_major=True, return_sequences=True)
    y = layer(time_major_x)
    self.assertEqual(layer.output_shape, (time_step, None, cell_units[-1]))

    y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(y)
    model = keras.models.Model(x, y)
    model.compile(
        optimizer='rmsprop',
        loss='mse',
        run_eagerly=testing_utils.should_run_eagerly())
    model.train_on_batch(
        np.zeros((batch, time_step, embedding_dim)),
        np.zeros((batch, time_step, cell_units[-1])))

    # Test masking.
    x = keras.Input((time_step, embedding_dim))
    time_major = keras.layers.Lambda(
        lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
    mask = keras.layers.Masking()(time_major)
    rnn = keras.layers.SimpleRNN(
        units, time_major=True, return_sequences=True)(mask)
    y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(rnn)
    model = keras.models.Model(x, y)
    model.compile(
        optimizer='rmsprop',
        loss='mse',
        run_eagerly=testing_utils.should_run_eagerly())
    model.train_on_batch(
        np.zeros((batch, time_step, embedding_dim)),
        np.zeros((batch, time_step, units)))

    # Test layer output
    x = keras.Input((time_step, embedding_dim))
    rnn_1 = keras.layers.SimpleRNN(units, return_sequences=True)
    y = rnn_1(x)

    model = keras.models.Model(x, y)
    model.compile(
        optimizer='rmsprop',
        loss='mse',
        run_eagerly=testing_utils.should_run_eagerly())
    model.train_on_batch(
        np.zeros((batch, time_step, embedding_dim)),
        np.zeros((batch, time_step, units)))

    x_np = np.random.random((batch, time_step, embedding_dim))
    y_np_1 = model.predict(x_np)

    time_major = keras.layers.Lambda(
        lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
    rnn_2 = keras.layers.SimpleRNN(
        units, time_major=True, return_sequences=True)
    y_2 = rnn_2(time_major)
    y_2 = keras.layers.Lambda(
        lambda t: array_ops.transpose(t, [1, 0, 2]))(y_2)

    model_2 = keras.models.Model(x, y_2)
    rnn_2.set_weights(rnn_1.get_weights())

    y_np_2 = model_2.predict(x_np)
    self.assertAllClose(y_np_1, y_np_2, atol=1e-4)
  def test_nest_input_output_with_init_state(self):
    batch = 10
    t = 5
    i1, i2, i3 = 3, 4, 5
    o1, o2, o3 = 2, 3, 4

    cell = NestedCell(o1, o2, o3)
    rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)

    input_1 = keras.Input((t, i1))
    input_2 = keras.Input((t, i2, i3))
    init_s1 = keras.Input((o1,))
    init_s2 = keras.Input((o2, o3))

    output1, output2, s1, s2 = rnn((input_1, input_2),
                                   initial_state=(init_s1, init_s2))

    self.assertEqual(output1.shape.as_list(), [None, t, o1])
    self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
    self.assertEqual(s1.shape.as_list(), [None, o1])
    self.assertEqual(s2.shape.as_list(), [None, o2, o3])

    model = keras.models.Model([input_1, input_2, init_s1, init_s2],
                               [output1, output2])
    model.compile(
        optimizer='rmsprop',
        loss='mse',
        run_eagerly=testing_utils.should_run_eagerly())
    model.train_on_batch(
        [np.zeros((batch, t, i1)),
         np.zeros((batch, t, i2, i3)),
         np.zeros((batch, o1)),
         np.zeros((batch, o2, o3))],
        [np.zeros((batch, t, o1)),
         np.zeros((batch, t, o2, o3))])
    self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])

    cell = NestedCell(o1, o2, o3, use_tuple=True)

    rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)

    input_1 = keras.Input((t, i1))
    input_2 = keras.Input((t, i2, i3))
    init_s1 = keras.Input((o1,))
    init_s2 = keras.Input((o2, o3))
    init_state = NestedState(s1=init_s1, s2=init_s2)

    output1, output2, s1, s2 = rnn(NestedInput(t1=input_1, t2=input_2),
                                   initial_state=init_state)

    self.assertEqual(output1.shape.as_list(), [None, t, o1])
    self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
    self.assertEqual(s1.shape.as_list(), [None, o1])
    self.assertEqual(s2.shape.as_list(), [None, o2, o3])

    model = keras.models.Model([input_1, input_2, init_s1, init_s2],
                               [output1, output2])
    model.compile(
        optimizer='rmsprop',
        loss='mse',
        run_eagerly=testing_utils.should_run_eagerly())
    model.train_on_batch(
        [np.zeros((batch, t, i1)),
         np.zeros((batch, t, i2, i3)),
         np.zeros((batch, o1)),
         np.zeros((batch, o2, o3))],
        [np.zeros((batch, t, o1)),
         np.zeros((batch, t, o2, o3))])
    self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
        with open(LOGDIR, 'rb') as f:
            lda = pickle.load(f)
    else:
        lda = fast_lda_topics(train,
                              n_components=sc.n_labels,
                              max_iter=1000,
                              total_samples=total_samples)
        with open(LOGDIR, 'wb') as f:
            pickle.dump(lda, f)
    topics_text = get_topics_string(lda,
                                    vocabulary=sc.vocabulary,
                                    n_topics=sc.n_labels,
                                    n_words=20)
### LDA with amortized inference
else:
    layers = [keras.Input(shape=shape)]
    if lognorm:
        layers.append(keras.layers.Lambda(lambda x: tf.math.log1p(x)))
    layers += [
        keras.layers.Dense(300, activation='relu'),
        keras.layers.Dense(300, activation='relu'),
        keras.layers.Dense(300, activation='relu'),
    ]
    encoder = keras.Sequential(
        layers,
        name="Encoder",
    )
    lda = AmortizedLDA(n_words=shape[-1],
                       n_topics=sc.n_labels,
                       lda_posterior=posterior,
                       word_distribution=distribution,
Exemple #13
0
 def test_merge_add_dynamic_shape(self):
   i1 = keras.Input(batch_shape=(4, None), dtype='float32')
   i2 = keras.Input(batch_shape=(4, 5), dtype='float32')
   layer = keras.layers.Add()
   o = layer([i1, i2])
   self.assertListEqual(o.shape.as_list(), [4, 5])
Exemple #14
0
    def test_clone_functional_model(self, share_weights):
        if share_weights:
            clone_fn = functools.partial(keras.models._clone_functional_model,
                                         layer_fn=models.share_weights)
        else:
            clone_fn = keras.models.clone_model

        val_a = np.random.random((10, 4))
        val_b = np.random.random((10, 4))
        val_out = np.random.random((10, 4))

        input_a = keras.Input(shape=(4, ))
        input_b = keras.Input(shape=(4, ))
        dense_1 = keras.layers.Dense(4, )
        dense_2 = keras.layers.Dense(4, )

        x_a = dense_1(input_a)
        x_a = keras.layers.Dropout(0.5)(x_a)
        x_a = keras.layers.BatchNormalization()(x_a)
        x_b = dense_1(input_b)
        x_a = dense_2(x_a)
        outputs = keras.layers.add([x_a, x_b])
        model = keras.models.Model([input_a, input_b], outputs)

        # With placeholder creation
        new_model = clone_fn(model)
        self.assertEqual(len(new_model.get_updates_for(new_model.inputs)), 2)
        new_model.compile(testing_utils.get_v2_optimizer('rmsprop'),
                          'mse',
                          run_eagerly=testing_utils.should_run_eagerly(),
                          experimental_run_tf_function=testing_utils.
                          should_run_tf_function())
        new_model.train_on_batch([val_a, val_b], val_out)

        # On top of new tensors
        input_a = keras.Input(shape=(4, ), name='a')
        input_b = keras.Input(shape=(4, ), name='b')
        new_model = keras.models.clone_model(model,
                                             input_tensors=[input_a, input_b])
        self.assertEqual(len(new_model.get_updates_for(new_model.inputs)), 2)
        new_model.compile(testing_utils.get_v2_optimizer('rmsprop'),
                          'mse',
                          run_eagerly=testing_utils.should_run_eagerly(),
                          experimental_run_tf_function=testing_utils.
                          should_run_tf_function())
        new_model.train_on_batch([val_a, val_b], val_out)

        # On top of new, non-Keras tensors
        if not context.executing_eagerly():
            # TODO(b/121277734):Skip Eager contexts, as Input() layers raise an error
            # saying they should not be used with EagerTensors
            input_a = keras.backend.variable(val_a)
            input_b = keras.backend.variable(val_b)
            new_model = clone_fn(model, input_tensors=[input_a, input_b])
            self.assertEqual(len(new_model.get_updates_for(new_model.inputs)),
                             2)
            new_model.compile(testing_utils.get_v2_optimizer('rmsprop'),
                              'mse',
                              run_eagerly=testing_utils.should_run_eagerly(),
                              experimental_run_tf_function=testing_utils.
                              should_run_tf_function())
            new_model.train_on_batch(None, val_out)
    def test_TensorBoard_multi_input_output(self):
        np.random.seed(1337)
        tmpdir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)

        with ops.Graph().as_default(), self.cached_session():
            filepath = os.path.join(tmpdir, 'logs')

            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)
            y_test = np_utils.to_categorical(y_test)
            y_train = np_utils.to_categorical(y_train)

            def data_generator(train):
                if train:
                    max_batch_index = len(x_train) // BATCH_SIZE
                else:
                    max_batch_index = len(x_test) // BATCH_SIZE
                i = 0
                while 1:
                    if train:
                        # simulate multi-input/output models
                        yield ([x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2,
                               [y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2)
                    else:
                        yield ([x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2,
                               [y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2)
                    i += 1
                    i %= max_batch_index

            inp1 = keras.Input((INPUT_DIM, ))
            inp2 = keras.Input((INPUT_DIM, ))
            inp = keras.layers.add([inp1, inp2])
            hidden = keras.layers.Dense(2, activation='relu')(inp)
            hidden = keras.layers.Dropout(0.1)(hidden)
            output1 = keras.layers.Dense(NUM_CLASSES,
                                         activation='softmax')(hidden)
            output2 = keras.layers.Dense(NUM_CLASSES,
                                         activation='softmax')(hidden)
            model = keras.models.Model([inp1, inp2], [output1, output2])
            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])

            # we must generate new callbacks for each test, as they aren't stateless
            def callbacks_factory(histogram_freq):
                return [
                    callbacks_v1.TensorBoard(log_dir=filepath,
                                             histogram_freq=histogram_freq,
                                             write_images=True,
                                             write_grads=True,
                                             batch_size=5)
                ]

            # fit without validation data
            model.fit([x_train] * 2, [y_train] * 2,
                      batch_size=BATCH_SIZE,
                      callbacks=callbacks_factory(histogram_freq=0),
                      epochs=3)

            # fit with validation data and accuracy
            model.fit([x_train] * 2, [y_train] * 2,
                      batch_size=BATCH_SIZE,
                      validation_data=([x_test] * 2, [y_test] * 2),
                      callbacks=callbacks_factory(histogram_freq=1),
                      epochs=2)

            # fit generator without validation data
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                callbacks=callbacks_factory(histogram_freq=0))

            # fit generator with validation data and accuracy
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                validation_data=([x_test] * 2, [y_test] * 2),
                                callbacks=callbacks_factory(histogram_freq=1))
            assert os.path.isdir(filepath)
Exemple #16
0
def get_small_functional_mlp(num_hidden, num_classes, input_dim):
    inputs = keras.Input(shape=(input_dim, ))
    outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs)
    activation = 'sigmoid' if num_classes == 1 else 'softmax'
    outputs = keras.layers.Dense(num_classes, activation=activation)(outputs)
    return keras.Model(inputs, outputs)
Exemple #17
0
def _op_with_tensor_list():
    inputs = keras.Input(shape=(10, ))
    x = array_ops.concat([inputs, inputs], axis=1)
    outputs = keras.layers.Dense(10)(x)
    return keras.Model(inputs, outputs)
    def test_cudnnrnn_bidirectional(self):
        if test.is_gpu_available(cuda_only=True):
            with self.session(use_gpu=True):
                rnn = keras.layers.CuDNNGRU
                samples = 2
                dim = 2
                timesteps = 2
                output_dim = 2
                mode = 'concat'

                x = np.random.random((samples, timesteps, dim))
                target_dim = 2 * output_dim if mode == 'concat' else output_dim
                y = np.random.random((samples, target_dim))

                # test with Sequential model
                model = keras.Sequential()
                model.add(
                    keras.layers.Bidirectional(rnn(output_dim),
                                               merge_mode=mode,
                                               input_shape=(None, dim)))
                model.compile(loss='mse',
                              optimizer=RMSPropOptimizer(learning_rate=0.001))
                model.fit(x, y, epochs=1, batch_size=1)

                # test config
                model.get_config()
                model = keras.models.model_from_json(model.to_json())
                model.summary()

                # test stacked bidirectional layers
                model = keras.Sequential()
                model.add(
                    keras.layers.Bidirectional(rnn(output_dim,
                                                   return_sequences=True),
                                               merge_mode=mode,
                                               input_shape=(None, dim)))
                model.add(
                    keras.layers.Bidirectional(rnn(output_dim),
                                               merge_mode=mode))
                model.compile(loss='mse',
                              optimizer=RMSPropOptimizer(learning_rate=0.001))
                model.fit(x, y, epochs=1, batch_size=1)

                # test with functional API
                inputs = keras.Input((timesteps, dim))
                outputs = keras.layers.Bidirectional(rnn(output_dim),
                                                     merge_mode=mode)(inputs)
                model = keras.Model(inputs, outputs)
                model.compile(loss='mse',
                              optimizer=RMSPropOptimizer(learning_rate=0.001))
                model.fit(x, y, epochs=1, batch_size=1)

                # Bidirectional and stateful
                inputs = keras.Input(batch_shape=(1, timesteps, dim))
                outputs = keras.layers.Bidirectional(rnn(output_dim,
                                                         stateful=True),
                                                     merge_mode=mode)(inputs)
                model = keras.Model(inputs, outputs)
                model.compile(loss='mse',
                              optimizer=RMSPropOptimizer(learning_rate=0.001))
                model.fit(x, y, epochs=1, batch_size=1)
Exemple #19
0
def _layer_with_tensor_arg():
    inputs = keras.Input(shape=(10, ))
    x = inputs * 2
    outputs = MyAdd()(inputs, x)
    return keras.Model(inputs, outputs)
 def test_output_shape(self):
   input_data = keras.Input(shape=(4,), dtype=dtypes.string)
   layer = get_layer_class()()
   int_data = layer(input_data)
   self.assertAllEqual(int_data.shape[1:], input_data.shape[1:])
Exemple #21
0
def _single_identity_op_at_end():
    inputs = keras.Input(shape=(10, ))
    x = keras.layers.Dense(10)(inputs)
    outputs = array_ops.identity(x)
    return keras.Model(inputs, outputs)
Exemple #22
0
    def test_weight_preprocessing(self):
        input_dim = 3
        output_dim = 3
        size = 2
        cases = [
            [
                (keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
                [np.random.random((2, 1)),
                 np.random.random((2, 1))],
                (None, 3, 2),
            ],
            [
                (keras.layers.TimeDistributed(keras.layers.Dense(1))),
                [np.random.random((2, 1)),
                 np.random.random((1, ))],
                (None, 3, 2),
            ],
            [
                (keras.layers.Conv1D(output_dim, size, use_bias=False)),
                [np.random.random((output_dim, input_dim, size, 1))],
                (None, 4, input_dim),
            ],
            [
                (keras.layers.Conv2D(output_dim,
                                     size,
                                     use_bias=False,
                                     data_format='channels_first')),
                [np.random.random((output_dim, input_dim, size, size))],
                (None, input_dim, 4, 4),
            ],
            [
                (keras.layers.Conv2DTranspose(output_dim,
                                              size,
                                              use_bias=False,
                                              data_format='channels_first')),
                [np.random.random((output_dim, input_dim, size, size))],
                (None, input_dim, 4, 4),
            ],
            [
                (keras.layers.Conv2DTranspose(output_dim,
                                              size,
                                              use_bias=False,
                                              data_format='channels_last')),
                [np.random.random((size, size, input_dim, output_dim))],
                (None, 4, 4, input_dim),
            ],
            [
                (keras.layers.Conv3D(output_dim,
                                     size,
                                     use_bias=False,
                                     data_format='channels_first')),
                [np.random.random((output_dim, input_dim, size, size, size))],
                (None, input_dim, 4, 4, 4),
            ],
            [
                (keras.layers.GRU(output_dim)),
                [
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, )),
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, )),
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, ))
                ],
                (None, 4, input_dim),
            ],
            [
                (keras.layers.LSTM(output_dim)),
                [
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, )),
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, )),
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, )),
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, ))
                ],
                (None, 4, input_dim),
            ],
        ]
        for layer, weights, input_shape in cases:
            layer.build(input_shape)
            _ = hdf5_format.preprocess_weights_for_loading(
                layer, weights, original_keras_version='1')

        model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
        _ = hdf5_format.preprocess_weights_for_loading(
            model, model.weights, original_keras_version='1')

        x = keras.Input((2, ))
        y = keras.layers.Dense(2)(x)
        model = keras.models.Model(x, y)
        _ = hdf5_format.preprocess_weights_for_loading(
            model, model.weights, original_keras_version='1')
Exemple #23
0
def _single_op_at_end():
    inputs = keras.Input(shape=(10, ))
    x = keras.layers.Dense(10)(inputs)
    outputs = gen_nn_ops.relu(x, name='hey')
    return inputs, outputs
 def test_output_shape(self):
   input_data = keras.Input(shape=(4,), dtype=dtypes.int64)
   layer = integer_lookup.IntegerLookup(max_tokens=2, num_oov_indices=1)
   int_data = layer(input_data)
   self.assertAllEqual(int_data.shape[1:], input_data.shape[1:])
Exemple #25
0
def _single_standalone_branch():
    inputs = keras.Input(shape=(10, ))
    x = keras.layers.Dense(10)(inputs)
    outputs = x * 2
    return inputs, outputs
 def get_functional_model():
     inputs = keras.Input(shape=(4, ))
     x = keras.layers.Dense(4, activation='relu')(inputs)
     x = keras.layers.BatchNormalization()(x)
     outputs = keras.layers.Dense(2)(x)
     return keras.Model(inputs, outputs)
 def test_invalid_forward_pass(self):
   inputs = keras.Input((3,))
   with self.assertRaisesRegexp(ValueError, 'You did something wrong!'):
     _ = InvalidLayer()(inputs)
    def test_Bidirectional_merged_value(self):
        rnn = keras.layers.LSTM
        samples = 2
        dim = 5
        timesteps = 3
        units = 3
        x = [np.random.rand(samples, timesteps, dim)]

        with self.cached_session():
            for merge_mode in ['sum', 'mul', 'ave', 'concat', None]:
                if merge_mode == 'sum':
                    merge_func = lambda y, y_rev: y + y_rev
                elif merge_mode == 'mul':
                    merge_func = lambda y, y_rev: y * y_rev
                elif merge_mode == 'ave':
                    merge_func = lambda y, y_rev: (y + y_rev) / 2
                elif merge_mode == 'concat':
                    merge_func = lambda y, y_rev: np.concatenate(
                        (y, y_rev), axis=-1)
                else:
                    merge_func = lambda y, y_rev: [y, y_rev]

                # basic case
                inputs = keras.Input((timesteps, dim))
                layer = keras.layers.Bidirectional(rnn(units,
                                                       return_sequences=True),
                                                   merge_mode=merge_mode)
                f_merged = keras.backend.function([inputs],
                                                  _to_list(layer(inputs)))
                f_forward = keras.backend.function(
                    [inputs], [layer.forward_layer(inputs)])
                f_backward = keras.backend.function(
                    [inputs],
                    [keras.backend.reverse(layer.backward_layer(inputs), 1)])

                y_merged = f_merged(x)
                y_expected = _to_list(
                    merge_func(f_forward(x)[0],
                               f_backward(x)[0]))
                assert len(y_merged) == len(y_expected)
                for x1, x2 in zip(y_merged, y_expected):
                    self.assertAllClose(x1, x2, atol=1e-5)

                # test return_state
                inputs = keras.Input((timesteps, dim))
                layer = keras.layers.Bidirectional(rnn(units,
                                                       return_state=True),
                                                   merge_mode=merge_mode)
                f_merged = keras.backend.function([inputs], layer(inputs))
                f_forward = keras.backend.function([inputs],
                                                   layer.forward_layer(inputs))
                f_backward = keras.backend.function(
                    [inputs], layer.backward_layer(inputs))
                n_states = len(layer.layer.states)

                y_merged = f_merged(x)
                y_forward = f_forward(x)
                y_backward = f_backward(x)
                y_expected = _to_list(merge_func(y_forward[0], y_backward[0]))
                assert len(y_merged) == len(y_expected) + n_states * 2
                for x1, x2 in zip(y_merged, y_expected):
                    self.assertAllClose(x1, x2, atol=1e-5)

                y_merged = y_merged[-n_states * 2:]
                y_forward = y_forward[-n_states:]
                y_backward = y_backward[-n_states:]
                for state_birnn, state_inner in zip(y_merged,
                                                    y_forward + y_backward):
                    self.assertAllClose(state_birnn, state_inner, atol=1e-5)
Exemple #29
0
    def test_rnn_cell_with_constants_layer(self):
        class RNNCellWithConstants(keras.layers.Layer):
            def __init__(self, units, **kwargs):
                self.units = units
                self.state_size = units
                super(RNNCellWithConstants, self).__init__(**kwargs)

            def build(self, input_shape):
                if not isinstance(input_shape, list):
                    raise TypeError('expects constants shape')
                [input_shape, constant_shape] = input_shape
                # will (and should) raise if more than one constant passed

                self.input_kernel = self.add_weight(shape=(input_shape[-1],
                                                           self.units),
                                                    initializer='uniform',
                                                    name='kernel')
                self.recurrent_kernel = self.add_weight(
                    shape=(self.units, self.units),
                    initializer='uniform',
                    name='recurrent_kernel')
                self.constant_kernel = self.add_weight(
                    shape=(constant_shape[-1], self.units),
                    initializer='uniform',
                    name='constant_kernel')
                self.built = True

            def call(self, inputs, states, constants):
                [prev_output] = states
                [constant] = constants
                h_input = keras.backend.dot(inputs, self.input_kernel)
                h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
                h_const = keras.backend.dot(constant, self.constant_kernel)
                output = h_input + h_state + h_const
                return output, [output]

            def get_config(self):
                config = {'units': self.units}
                base_config = super(RNNCellWithConstants, self).get_config()
                return dict(list(base_config.items()) + list(config.items()))

        with self.cached_session():
            # Test basic case.
            x = keras.Input((None, 5))
            c = keras.Input((3, ))
            cell = RNNCellWithConstants(32)
            layer = keras.layers.RNN(cell)
            y = layer(x, constants=c)

            model = keras.models.Model([x, c], y)
            model.compile(optimizer='rmsprop', loss='mse')
            model.train_on_batch([np.zeros(
                (6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)))

        with self.cached_session():
            # Test basic case serialization.
            x_np = np.random.random((6, 5, 5))
            c_np = np.random.random((6, 3))
            y_np = model.predict([x_np, c_np])
            weights = model.get_weights()
            config = layer.get_config()
            custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
            with keras.utils.CustomObjectScope(custom_objects):
                layer = keras.layers.RNN.from_config(config.copy())
            y = layer(x, constants=c)
            model = keras.models.Model([x, c], y)
            model.set_weights(weights)
            y_np_2 = model.predict([x_np, c_np])
            self.assertAllClose(y_np, y_np_2, atol=1e-4)

        with self.cached_session():
            # test flat list inputs.
            with keras.utils.CustomObjectScope(custom_objects):
                layer = keras.layers.RNN.from_config(config.copy())
            y = layer([x, c])
            model = keras.models.Model([x, c], y)
            model.set_weights(weights)
            y_np_3 = model.predict([x_np, c_np])
            self.assertAllClose(y_np, y_np_3, atol=1e-4)

        with self.cached_session():
            # Test stacking.
            cells = [
                keras.layers.recurrent.GRUCell(8),
                RNNCellWithConstants(12),
                RNNCellWithConstants(32)
            ]
            layer = keras.layers.recurrent.RNN(cells)
            y = layer(x, constants=c)
            model = keras.models.Model([x, c], y)
            model.compile(optimizer='rmsprop', loss='mse')
            model.train_on_batch([np.zeros(
                (6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)))

        with self.cached_session():
            # Test GRUCell reset_after property.
            x = keras.Input((None, 5))
            c = keras.Input((3, ))
            cells = [keras.layers.recurrent.GRUCell(32, reset_after=True)]
            layer = keras.layers.recurrent.RNN(cells)
            y = layer(x, constants=c)
            model = keras.models.Model([x, c], y)
            model.compile(optimizer='rmsprop', loss='mse')
            model.train_on_batch([np.zeros(
                (6, 5, 5)), np.zeros((6, 3))], np.zeros((6, 32)))

        with self.cached_session():
            # Test stacked RNN serialization
            x_np = np.random.random((6, 5, 5))
            c_np = np.random.random((6, 3))
            y_np = model.predict([x_np, c_np])
            weights = model.get_weights()
            config = layer.get_config()
            with keras.utils.CustomObjectScope(custom_objects):
                layer = keras.layers.recurrent.RNN.from_config(config.copy())
            y = layer(x, constants=c)
            model = keras.models.Model([x, c], y)
            model.set_weights(weights)
            y_np_2 = model.predict([x_np, c_np])
            self.assertAllClose(y_np, y_np_2, atol=1e-4)
    def test_training_pandas(self):
        try:
            import pandas as pd  # pylint: disable=g-import-not-at-top
        except ImportError:
            self.skipTest('Skipping test because pandas is not installed.')
        input_a = keras.Input(shape=(3, ), name='input_a')
        input_b = keras.Input(shape=(3, ), name='input_b')
        input_c = keras.Input(shape=(1, ), name='input_b')

        x = keras.layers.Dense(4, name='dense_1')(input_a)
        y = keras.layers.Dense(3, name='dense_2')(input_b)
        z = keras.layers.Dense(1, name='dense_3')(input_c)

        model_1 = keras.Model(inputs=input_a, outputs=x)
        model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y])
        model_3 = keras.Model(inputs=input_c, outputs=z)

        model_1.compile(optimizer='rmsprop', loss='mse')
        model_2.compile(optimizer='rmsprop', loss='mse')

        input_a_np = np.random.random((10, 3))
        input_b_np = np.random.random((10, 3))
        input_a_df = pd.DataFrame(input_a_np)
        input_b_df = pd.DataFrame(input_b_np)

        output_a_df = pd.DataFrame(np.random.random((10, 4)))
        output_b_df = pd.DataFrame(np.random.random((10, 3)))

        model_1.fit(input_a_df, output_a_df)
        model_2.fit([input_a_df, input_b_df], [output_a_df, output_b_df])
        model_1.fit([input_a_df], [output_a_df])
        model_1.fit({'input_a': input_a_df}, output_a_df)
        model_2.fit({
            'input_a': input_a_df,
            'input_b': input_b_df
        }, [output_a_df, output_b_df])

        model_1.evaluate(input_a_df, output_a_df)
        model_2.evaluate([input_a_df, input_b_df], [output_a_df, output_b_df])
        model_1.evaluate([input_a_df], [output_a_df])
        model_1.evaluate({'input_a': input_a_df}, output_a_df)
        model_2.evaluate({
            'input_a': input_a_df,
            'input_b': input_b_df
        }, [output_a_df, output_b_df])

        # Verify predicting on pandas vs numpy returns the same result
        predict_1_pandas = model_1.predict(input_a_df)
        predict_2_pandas = model_2.predict([input_a_df, input_b_df])
        predict_3_pandas = model_3.predict(input_a_df[0])

        predict_1_numpy = model_1.predict(input_a_np)
        predict_2_numpy = model_2.predict([input_a_np, input_b_np])
        predict_3_numpy = model_3.predict(np.asarray(input_a_df[0]))

        self.assertAllClose(predict_1_numpy, predict_1_pandas)
        self.assertAllClose(predict_2_numpy, predict_2_pandas)
        self.assertAllClose(predict_3_numpy, predict_3_pandas)

        # Extra ways to pass in dataframes
        model_1.predict([input_a_df])
        model_1.predict({'input_a': input_a_df})
        model_2.predict({'input_a': input_a_df, 'input_b': input_b_df})