Пример #1
0
    def get_model(
        self,
        max_words=10,
        initial_weights=None,
        distribution=None,
        input_shapes=None,
    ):
        del input_shapes
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids = keras.layers.Input(shape=(max_words, ),
                                          dtype=np.int32,
                                          name="words")
            word_embed = keras.layers.Embedding(input_dim=20,
                                                output_dim=10)(word_ids)
            if self.use_distributed_dense:
                word_embed = keras.layers.TimeDistributed(
                    keras.layers.Dense(4))(word_embed)
            avg = keras.layers.GlobalAveragePooling1D()(word_embed)
            preds = keras.layers.Dense(2, activation="softmax")(avg)
            model = keras.Model(inputs=[word_ids], outputs=[preds])

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(
                optimizer=gradient_descent_keras.SGD(learning_rate=0.1),
                loss="sparse_categorical_crossentropy",
                metrics=["sparse_categorical_accuracy"],
            )
        return model
Пример #2
0
  def get_model(self,
                max_words=10,
                initial_weights=None,
                distribution=None,
                input_shapes=None):
    del input_shapes
    rnn_cls = self._get_layer_class()

    with keras_correctness_test_base.MaybeDistributionScope(distribution):
      word_ids = keras.layers.Input(
          shape=(max_words,), dtype=np.int32, name='words')
      word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids)
      rnn_embed = rnn_cls(units=4, return_sequences=False)(word_embed)

      dense_output = keras.layers.Dense(2)(rnn_embed)
      preds = keras.layers.Softmax(dtype='float32')(dense_output)
      model = keras.Model(inputs=[word_ids], outputs=[preds])

      if initial_weights:
        model.set_weights(initial_weights)

      optimizer_fn = gradient_descent_keras.SGD

      model.compile(
          optimizer=optimizer_fn(learning_rate=0.1),
          loss='sparse_categorical_crossentropy',
          metrics=['sparse_categorical_accuracy'])
    return model
    def get_model(self,
                  initial_weights=None,
                  distribution=None,
                  input_shapes=None):
        del input_shapes
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            image = keras.layers.Input(shape=(28, 28, 3), name='image')
            c1 = keras.layers.Conv2D(
                name='conv1',
                filters=16,
                kernel_size=(3, 3),
                strides=(4, 4),
                kernel_regularizer=keras.regularizers.l2(1e-4))(image)
            if self.with_batch_norm == 'regular':
                c1 = keras.layers.BatchNormalization(name='bn1')(c1)
            elif self.with_batch_norm == 'sync':
                # Test with parallel batch norms to verify all-reduce works OK.
                bn1 = keras.layers.SyncBatchNormalization(name='bn1')(c1)
                bn2 = keras.layers.SyncBatchNormalization(name='bn2')(c1)
                c1 = keras.layers.Add()([bn1, bn2])
            c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1)
            logits = keras.layers.Dense(10, activation='softmax', name='pred')(
                keras.layers.Flatten()(c1))
            model = keras.Model(inputs=[image], outputs=[logits])

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(optimizer=gradient_descent.SGD(learning_rate=0.1),
                          loss='sparse_categorical_crossentropy',
                          metrics=['sparse_categorical_accuracy'])

        return model
Пример #4
0
    def get_model(self,
                  initial_weights=None,
                  distribution=None,
                  input_shapes=None):
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            # We add few non-linear layers to make it non-trivial.
            model = keras.Sequential()
            model.add(
                keras.layers.Dense(10, activation="relu", input_shape=(1, )))
            model.add(
                keras.layers.Dense(
                    10,
                    activation="relu",
                    kernel_regularizer=keras.regularizers.l2(1e-4),
                ))
            model.add(keras.layers.Dense(10, activation="relu"))
            model.add(keras.layers.Dense(1))

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(
                loss=keras.losses.mean_squared_error,
                optimizer=gradient_descent_keras.SGD(0.05),
                metrics=["mse"],
            )
            return model
    def get_model(self,
                  max_words=10,
                  initial_weights=None,
                  distribution=None,
                  input_shapes=None):
        del input_shapes
        batch_size = keras_correctness_test_base._GLOBAL_BATCH_SIZE

        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids = keras.layers.Input(shape=(max_words, ),
                                          batch_size=batch_size,
                                          dtype=np.int32,
                                          name='words')
            word_embed = keras.layers.Embedding(input_dim=20,
                                                output_dim=10)(word_ids)
            lstm_embed = keras.layers.LSTM(units=4,
                                           return_sequences=False,
                                           stateful=True)(word_embed)

            preds = keras.layers.Dense(2, activation='softmax')(lstm_embed)
            model = keras.Model(inputs=[word_ids], outputs=[preds])

            if initial_weights:
                model.set_weights(initial_weights)

            optimizer_fn = gradient_descent_keras.SGD

            model.compile(optimizer=optimizer_fn(learning_rate=0.1),
                          loss='sparse_categorical_crossentropy',
                          metrics=['sparse_categorical_accuracy'])
        return model
Пример #6
0
    def get_model(self,
                  initial_weights=None,
                  distribution=None,
                  input_shapes=None):
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            model = SubclassedModel(initial_weights, input_shapes)

            model.compile(loss=keras.losses.mean_squared_error,
                          optimizer=gradient_descent_keras.SGD(0.05),
                          metrics=['mse'])
            return model
Пример #7
0
    def get_model(
        self,
        max_words=10,
        initial_weights=None,
        distribution=None,
        input_shapes=None,
    ):
        del input_shapes
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids_a = keras.layers.Input(shape=(max_words, ),
                                            dtype=np.int32,
                                            name="words_a")
            word_ids_b = keras.layers.Input(shape=(max_words, ),
                                            dtype=np.int32,
                                            name="words_b")

            def submodel(embedding, word_ids):
                word_embed = embedding(word_ids)
                rep = keras.layers.GlobalAveragePooling1D()(word_embed)
                return keras.Model(inputs=[word_ids], outputs=[rep])

            word_embed = keras.layers.Embedding(
                input_dim=20,
                output_dim=10,
                input_length=max_words,
                embeddings_initializer=keras.initializers.RandomUniform(0, 1),
            )

            a_rep = submodel(word_embed, word_ids_a).outputs[0]
            b_rep = submodel(word_embed, word_ids_b).outputs[0]
            sim = keras.layers.Dot(axes=1, normalize=True)([a_rep, b_rep])

            model = keras.Model(inputs=[word_ids_a, word_ids_b], outputs=[sim])

            if initial_weights:
                model.set_weights(initial_weights)

            # TODO(b/130808953): Switch back to the V1 optimizer after global_step
            # is made mirrored.
            model.compile(
                optimizer=gradient_descent_keras.SGD(learning_rate=0.1),
                loss="mse",
                metrics=["mse"],
            )
        return model