コード例 #1
0
  def get_model(self,
                max_words=10,
                initial_weights=None,
                distribution=None,
                cloning=None,
                input_shapes=None):
    del input_shapes
    with keras_correctness_test_base.MaybeDistributionScope(distribution):
      word_ids = keras.layers.Input(
          shape=(max_words,), dtype=np.int32, name='words')
      word_embed = keras.layers.Embedding(input_dim=20,
                                          output_dim=10)(word_ids)
      lstm_embed = keras.layers.LSTM(units=4,
                                     return_sequences=False)(word_embed)

      preds = keras.layers.Dense(2, activation='softmax')(lstm_embed)
      model = keras.Model(inputs=[word_ids], outputs=[preds])

      if initial_weights:
        model.set_weights(initial_weights)

      # TODO(b/130808953): Re-enable the V1 optimizer after iterations is
      # mirrored.
      optimizer_fn = (
          gradient_descent.GradientDescentOptimizer
          if cloning else gradient_descent_keras.SGD)

      model.compile(
          optimizer=optimizer_fn(learning_rate=0.1),
          loss='sparse_categorical_crossentropy',
          metrics=['sparse_categorical_accuracy'], cloning=cloning)
    return model
    def get_model(self,
                  max_words=10,
                  initial_weights=None,
                  distribution=None,
                  input_shapes=None):
        del input_shapes
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids = keras.layers.Input(shape=(max_words, ),
                                          dtype=np.int32,
                                          name='words')
            word_embed = keras.layers.Embedding(input_dim=20,
                                                output_dim=10)(word_ids)
            if self.use_distributed_dense:
                word_embed = keras.layers.TimeDistributed(
                    keras.layers.Dense(4))(word_embed)
            avg = keras.layers.GlobalAveragePooling1D()(word_embed)
            preds = keras.layers.Dense(2, activation='softmax')(avg)
            model = keras.Model(inputs=[word_ids], outputs=[preds])

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(
                optimizer=gradient_descent_keras.SGD(learning_rate=0.1),
                loss='sparse_categorical_crossentropy',
                metrics=['sparse_categorical_accuracy'])
        return model
コード例 #3
0
    def get_model(self,
                  cloning,
                  initial_weights=None,
                  distribution=None,
                  input_shapes=None):
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            # We add few non-linear layers to make it non-trivial.
            model = keras.Sequential()
            model.add(
                keras.layers.Dense(10, activation='relu', input_shape=(1, )))
            model.add(
                keras.layers.Dense(
                    10,
                    activation='relu',
                    kernel_regularizer=keras.regularizers.l2(1e-4)))
            model.add(keras.layers.Dense(10, activation='relu'))
            model.add(keras.layers.Dense(1))

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(loss=keras.losses.mean_squared_error,
                          optimizer=gradient_descent_keras.SGD(0.5),
                          metrics=['mse'],
                          cloning=cloning)
            return model
コード例 #4
0
    def get_model(self, max_words=10, initial_weights=None, distribution=None):
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids_a = keras.layers.Input(shape=(max_words, ),
                                            dtype=np.int32,
                                            name='words_a')
            word_ids_b = keras.layers.Input(shape=(max_words, ),
                                            dtype=np.int32,
                                            name='words_b')

            def submodel(embedding, word_ids):
                word_embed = embedding(word_ids)
                rep = keras.layers.GlobalAveragePooling1D()(word_embed)
                return keras.Model(inputs=[word_ids], outputs=[rep])

            word_embed = keras.layers.Embedding(
                input_dim=20,
                output_dim=10,
                input_length=max_words,
                embeddings_initializer=keras.initializers.RandomUniform(0, 1))

            a_rep = submodel(word_embed, word_ids_a).outputs[0]
            b_rep = submodel(word_embed, word_ids_b).outputs[0]
            sim = keras.layers.Dot(axes=1, normalize=True)([a_rep, b_rep])

            model = keras.Model(inputs=[word_ids_a, word_ids_b], outputs=[sim])

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(optimizer=gradient_descent.GradientDescentOptimizer(
                learning_rate=0.1),
                          loss='mse',
                          metrics=['mse'])
        return model
コード例 #5
0
  def get_model(self,
                max_words=10,
                initial_weights=None,
                distribution=None,
                input_shapes=None):
    del input_shapes
    batch_size = keras_correctness_test_base._GLOBAL_BATCH_SIZE

    with keras_correctness_test_base.MaybeDistributionScope(distribution):
      word_ids = keras.layers.Input(
          shape=(max_words,),
          batch_size=batch_size,
          dtype=np.int32,
          name='words')
      word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids)
      lstm_embed = keras.layers.LSTM(
          units=4, return_sequences=False, stateful=True)(
              word_embed)

      preds = keras.layers.Dense(2, activation='softmax')(lstm_embed)
      model = keras.Model(inputs=[word_ids], outputs=[preds])

      if initial_weights:
        model.set_weights(initial_weights)

      optimizer_fn = gradient_descent_keras.SGD

      model.compile(
          optimizer=optimizer_fn(learning_rate=0.1),
          loss='sparse_categorical_crossentropy',
          metrics=['sparse_categorical_accuracy'])
    return model
コード例 #6
0
    def get_model(self,
                  initial_weights=None,
                  distribution=None,
                  input_shapes=None):
        del input_shapes
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            image = keras.layers.Input(shape=(28, 28, 3), name='image')
            c1 = keras.layers.Conv2D(
                name='conv1',
                filters=16,
                kernel_size=(3, 3),
                strides=(4, 4),
                kernel_regularizer=keras.regularizers.l2(1e-4))(image)
            if self.with_batch_norm == 'regular':
                c1 = keras.layers.BatchNormalization(name='bn1')(c1)
            elif self.with_batch_norm == 'sync':
                c1 = keras.layers.SyncBatchNormalization(name='bn1')(c1)
            c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1)
            logits = keras.layers.Dense(10, activation='softmax', name='pred')(
                keras.layers.Flatten()(c1))
            model = keras.Model(inputs=[image], outputs=[logits])

            if initial_weights:
                model.set_weights(initial_weights)

            model.compile(optimizer=gradient_descent.SGD(learning_rate=0.1),
                          loss='sparse_categorical_crossentropy',
                          metrics=['sparse_categorical_accuracy'])

        return model
コード例 #7
0
    def get_model(self,
                  max_words=10,
                  initial_weights=None,
                  distribution=None,
                  input_shapes=None):
        del input_shapes
        rnn_cls = self._get_layer_class()

        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids = keras.layers.Input(shape=(max_words, ),
                                          dtype=np.int32,
                                          name='words')
            word_embed = keras.layers.Embedding(input_dim=20,
                                                output_dim=10)(word_ids)
            rnn_embed = rnn_cls(units=4, return_sequences=False)(word_embed)

            dense_output = keras.layers.Dense(2)(rnn_embed)
            preds = keras.layers.Softmax(dtype='float32')(dense_output)
            model = keras.Model(inputs=[word_ids], outputs=[preds])

            if initial_weights:
                model.set_weights(initial_weights)

            optimizer_fn = gradient_descent_keras.SGD

            model.compile(optimizer=optimizer_fn(learning_rate=0.1),
                          loss='sparse_categorical_crossentropy',
                          metrics=['sparse_categorical_accuracy'])
        return model
コード例 #8
0
  def get_model(self, initial_weights=None, distribution=None):
    with keras_correctness_test_base.MaybeDistributionScope(distribution):

      real_batch_size = (1 if distribution else
                         self._distribution_to_test.num_replicas_in_sync)

      user_input = keras.layers.Input(
          shape=(self._batch_size,),
          batch_size=real_batch_size,
          name="users",
          dtype=dtypes.int32)

      item_input = keras.layers.Input(
          shape=(self._batch_size,),
          batch_size=real_batch_size,
          name="items",
          dtype=dtypes.int32)

      concat = keras.layers.concatenate([user_input, item_input], axis=-1)
      logits = keras.layers.Dense(
          1, name="rating")(
              math_ops.cast(concat, dtypes.float32))

      keras_model = keras.Model(inputs=[user_input, item_input], outputs=logits)

      if initial_weights:
        keras_model.set_weights(initial_weights)

      keras_model.compile(loss="mse", optimizer=gradient_descent_keras.SGD(0.5))
      return keras_model
コード例 #9
0
    def get_model(self,
                  initial_weights=None,
                  distribution=None,
                  input_shapes=None):
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            model = SubclassedModel(initial_weights, input_shapes)

            model.compile(loss=keras.losses.mean_squared_error,
                          optimizer=gradient_descent_keras.SGD(0.05),
                          metrics=['mse'])
            return model
    def get_model(self,
                  max_words=10,
                  initial_weights=None,
                  distribution=None,
                  experimental_run_tf_function=None,
                  input_shapes=None):
        del input_shapes
        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids_a = keras.layers.Input(shape=(max_words, ),
                                            dtype=np.int32,
                                            name='words_a')
            word_ids_b = keras.layers.Input(shape=(max_words, ),
                                            dtype=np.int32,
                                            name='words_b')

            def submodel(embedding, word_ids):
                word_embed = embedding(word_ids)
                rep = keras.layers.GlobalAveragePooling1D()(word_embed)
                return keras.Model(inputs=[word_ids], outputs=[rep])

            word_embed = keras.layers.Embedding(
                input_dim=20,
                output_dim=10,
                input_length=max_words,
                embeddings_initializer=keras.initializers.RandomUniform(0, 1))

            a_rep = submodel(word_embed, word_ids_a).outputs[0]
            b_rep = submodel(word_embed, word_ids_b).outputs[0]
            sim = keras.layers.Dot(axes=1, normalize=True)([a_rep, b_rep])

            model = keras.Model(inputs=[word_ids_a, word_ids_b], outputs=[sim])

            if initial_weights:
                model.set_weights(initial_weights)

            # TODO(b/130808953): Switch back to the V1 optimizer after global_step
            # is made mirrored.
            model.compile(
                optimizer=gradient_descent_keras.SGD(learning_rate=0.1),
                loss='mse',
                experimental_run_tf_function=experimental_run_tf_function,
                metrics=['mse'])
        return model
コード例 #11
0
    def get_model(self,
                  max_words=10,
                  initial_weights=None,
                  distribution=None,
                  run_distributed=None,
                  input_shapes=None):
        del input_shapes

        if tf2.enabled():
            if not context.executing_eagerly():
                self.skipTest(
                    "LSTM v2 and legacy graph mode don't work together.")
            lstm = rnn_v2.LSTM
        else:
            lstm = rnn_v1.LSTM

        with keras_correctness_test_base.MaybeDistributionScope(distribution):
            word_ids = keras.layers.Input(shape=(max_words, ),
                                          dtype=np.int32,
                                          name='words')
            word_embed = keras.layers.Embedding(input_dim=20,
                                                output_dim=10)(word_ids)
            lstm_embed = lstm(units=4, return_sequences=False)(word_embed)

            preds = keras.layers.Dense(2, activation='softmax')(lstm_embed)
            model = keras.Model(inputs=[word_ids], outputs=[preds])

            if initial_weights:
                model.set_weights(initial_weights)

            optimizer_fn = gradient_descent_keras.SGD

            model.compile(optimizer=optimizer_fn(learning_rate=0.1),
                          loss='sparse_categorical_crossentropy',
                          metrics=['sparse_categorical_accuracy'],
                          run_distributed=run_distributed)
        return model