Esempio n. 1
0
    def get_model(self, max_words=10, initial_weights=None, distribution=None):
        batch_size = keras_correctness_test_base._GLOBAL_BATCH_SIZE

        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids = keras.layers.Input(shape=(max_words, ),
                                          batch_size=batch_size,
                                          dtype=np.int32,
                                          name='words')
            word_embed = keras.layers.Embedding(input_dim=20,
                                                output_dim=10)(word_ids)
            lstm_embed = keras.layers.LSTM(units=4,
                                           return_sequences=False,
                                           stateful=True)(word_embed)

            preds = keras.layers.Dense(2, activation='softmax')(lstm_embed)
            model = keras.Model(inputs=[word_ids], outputs=[preds])

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(optimizer=gradient_descent.GradientDescentOptimizer(
                learning_rate=0.1),
                          loss='sparse_categorical_crossentropy',
                          metrics=['sparse_categorical_accuracy'])
        return model
  def get_model(self, initial_weights=None, distribution=None):
    with keras_correctness_test_base.MaybeDistributionScope(distribution):
      image = keras.layers.Input(shape=(28, 28, 3), name='image')
      c1 = keras.layers.Conv2D(
          name='conv1', filters=16, kernel_size=(3, 3), strides=(4, 4),
          kernel_regularizer=keras.regularizers.l2(1e-4))(
              image)
      if self.with_batch_norm:
        c1 = keras.layers.BatchNormalization(name='bn1')(c1)
      c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1)
      logits = keras.layers.Dense(
          10, activation='softmax', name='pred')(
              keras.layers.Flatten()(c1))
      model = keras.Model(inputs=[image], outputs=[logits])

      if initial_weights:
        model.set_weights(initial_weights)

      model.compile(
          optimizer=gradient_descent.SGD(
              learning_rate=0.1),
          loss='sparse_categorical_crossentropy',
          metrics=['sparse_categorical_accuracy'])

    return model
Esempio n. 3
0
    def get_model(self, max_words=10, initial_weights=None, distribution=None):
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids_a = keras.layers.Input(shape=(max_words, ),
                                            dtype=np.int32,
                                            name='words_a')
            word_ids_b = keras.layers.Input(shape=(max_words, ),
                                            dtype=np.int32,
                                            name='words_b')

            def submodel(embedding, word_ids):
                word_embed = embedding(word_ids)
                rep = keras.layers.GlobalAveragePooling1D()(word_embed)
                return keras.Model(inputs=[word_ids], outputs=[rep])

            word_embed = keras.layers.Embedding(
                input_dim=20,
                output_dim=10,
                input_length=max_words,
                embeddings_initializer=keras.initializers.RandomUniform(0, 1))

            a_rep = submodel(word_embed, word_ids_a).outputs[0]
            b_rep = submodel(word_embed, word_ids_b).outputs[0]
            sim = keras.layers.Dot(axes=1, normalize=True)([a_rep, b_rep])

            model = keras.Model(inputs=[word_ids_a, word_ids_b], outputs=[sim])

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(optimizer=gradient_descent.GradientDescentOptimizer(
                learning_rate=0.1),
                          loss='mse',
                          metrics=['mse'])
        return model
    def get_model(self, initial_weights=None, distribution=None):
        with keras_correctness_test_base.MaybeDistributionScope(distribution):

            real_batch_size = (1 if distribution else
                               self._distribution_to_test.num_replicas_in_sync)

            user_input = keras.layers.Input(shape=(self._batch_size, ),
                                            batch_size=real_batch_size,
                                            name="users",
                                            dtype=dtypes.int32)

            item_input = keras.layers.Input(shape=(self._batch_size, ),
                                            batch_size=real_batch_size,
                                            name="items",
                                            dtype=dtypes.int32)

            concat = keras.layers.concatenate([user_input, item_input],
                                              axis=-1)
            logits = keras.layers.Dense(1, name="rating")(math_ops.cast(
                concat, dtypes.float32))

            keras_model = keras.Model(inputs=[user_input, item_input],
                                      outputs=logits)

            if initial_weights:
                keras_model.set_weights(initial_weights)

            keras_model.compile(loss="mse",
                                optimizer=gradient_descent_keras.SGD(0.5))
            return keras_model
Esempio n. 5
0
    def get_model(self, initial_weights=None, distribution=None):
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            # We add few non-linear layers to make it non-trivial.
            model = keras.Sequential()
            model.add(
                keras.layers.Dense(10, activation='relu', input_shape=(1, )))
            model.add(keras.layers.Dense(10, activation='relu'))
            model.add(keras.layers.Dense(10, activation='relu'))
            model.add(keras.layers.Dense(1))

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(loss=keras.losses.mean_squared_error,
                          optimizer=gradient_descent_keras.SGD(0.5),
                          metrics=['mse'])
            return model
Esempio n. 6
0
    def get_model(self, max_words=10, initial_weights=None, distribution=None):
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids = keras.layers.Input(shape=(max_words, ),
                                          dtype=np.int32,
                                          name='words')
            word_embed = keras.layers.Embedding(input_dim=20,
                                                output_dim=10)(word_ids)
            if self.use_distributed_dense:
                word_embed = keras.layers.TimeDistributed(
                    keras.layers.Dense(4))(word_embed)
            avg = keras.layers.GlobalAveragePooling1D()(word_embed)
            preds = keras.layers.Dense(2, activation='softmax')(avg)
            model = keras.Model(inputs=[word_ids], outputs=[preds])

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(optimizer=gradient_descent.GradientDescentOptimizer(
                learning_rate=0.1),
                          loss='sparse_categorical_crossentropy',
                          metrics=['sparse_categorical_accuracy'])
        return model