コード例 #1
0
        def custom_model_fn(features, labels, mode):
            """Used to build a TF custom estimator"""
            embedded_input = tf.contrib.layers.embed_sequence(features['sequence'],
                                                              embedding_matrix.shape[0],
                                                              embedding_matrix.shape[1],
                                                              initializer=embedding_initializer,
                                                              trainable=False)
            first_gru = layers.CuDNNGRU(self.num_neurons, return_sequences=True)
            gru_output = layers.Bidirectional(first_gru)(embedded_input)
            gru_output = layers.Bidirectional(layers.CuDNNGRU(self.num_neurons))(gru_output)
            logits = layers.Dense(6)(gru_output)

            predicted_classes = tf.argmax(logits, 1)
            if mode == tf.estimator.ModeKeys.PREDICT:
                predictions = {
                    'class_ids': predicted_classes[:, tf.newaxis],
                    'probabilities': tf.nn.sigmoid(logits),
                    'logits': logits
                }
                return tf.estimator.EstimatorSpec(mode, predictions=predictions)

            loss = tf.losses.sigmoid_cross_entropy(labels, logits)

            if mode == tf.estimator.ModeKeys.EVAL:
                return tf.estimator.EstimatorSpec(mode, loss=loss)

            assert mode == tf.estimator.ModeKeys.TRAIN
            optimizer = tf.train.AdamOptimizer()
            train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
            return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
コード例 #2
0
    def build_bigru_model(self, embedding_matrix) -> Model:
        """
        build and return BiGru model using standard optimizer and loss
        :param embedding_matrix:
        :return:
        """
        token_input = layers.Input(shape=(self.max_seq_len, ))
        embedding_layer = layers.Embedding(self.vocab_size + 1,
                                           self.embedding_dims,
                                           weights=[embedding_matrix],
                                           trainable=False)
        embedded_input = embedding_layer(token_input)
        gru_output = layers.Bidirectional(
            layers.CuDNNGRU(self.num_neurons,
                            return_sequences=True))(embedded_input)
        gru_output = layers.Bidirectional(layers.CuDNNGRU(
            self.num_neurons))(gru_output)
        dense_output = layers.Dense(6, activation='sigmoid')(gru_output)

        bigru_model = Model(token_input, dense_output)
        bigru_model.compile(optimizer=optimizers.Adam(),
                            loss=losses.binary_crossentropy)

        print('generated bigru model...')

        return bigru_model
    def build_bigru_model(self, embedding_matrix) -> Tuple[Model, Model]:
        """
        build and return multi-headed BiGru model
        with 1) MLM output from first GRU layer
             2) standard toxicity classification output from second
        :param embedding_matrix:
        :return:
        """
        token_input = layers.Input(shape=(self.max_seq_len,))
        embedding_layer = layers.Embedding(self.vocab_size + 1,
                                           self.embedding_dims,
                                           weights=[embedding_matrix],
                                           trainable=False)
        embedded_input = embedding_layer(token_input)
        gru1_output = layers.Bidirectional(layers.CuDNNGRU(self.num_neurons,
                                                           return_sequences=True))(embedded_input)
        aux_output = layers.Dense(self.vocab_size + 1, 'softmax', name='aux_output')(gru1_output)
        gru2_output = layers.Bidirectional(layers.CuDNNGRU(self.num_neurons))(gru1_output)
        main_output = layers.Dense(6, activation='sigmoid', name='main_output')(gru2_output)

        training_model = Model(inputs=token_input, outputs=[main_output, aux_output])
        mlm_loss = MaskedPenalizedSparseCategoricalCrossentropy(CONFIDENCE_PENALTY)
        training_model.compile(optimizer=optimizers.Adam(),
                               loss={'main_output': MaskedBinaryCrossedentropy(),
                                     'aux_output': mlm_loss})

        inference_model = Model(inputs=token_input, outputs=main_output)

        print('generated bigru model...')
        print(training_model.summary())

        return training_model, inference_model
コード例 #4
0
def build_rnn_model():
  sequences= layers.Input(shape = (max_length, )) 
  embedded = layers.Embedding(12000, 64) (sequences)
  x = layers.CuDNNGRU(128, return_sequences=True) (embedded)
  x = layers.CuDNNGRU(128) (x)
  x = layers.Dense(32, activation='relu') (x)
  x = layers.Dense(100, activation='relu') (x)
  predictions = layers.Dense(1, activation='sigmoid') (x)
  model = models.Model(inputs = sequences, outputs = predictions)
  model.compile(
      optimizer = 'rmsprop',
      loss = 'binary_crossentropy',
      metrics = ['binary_accuracy']
  )
  return model
コード例 #5
0
    def build_bigru_model(self) -> Model:
        """
        build and return BiGru model using standard optimizer and loss
        :return:
        """
        embedded_input = layers.Input(shape=(None, self.embedding_dims))
        gru_output = layers.Bidirectional(
            layers.CuDNNGRU(self.num_neurons,
                            return_sequences=True))(embedded_input)
        gru_output = layers.Bidirectional(layers.CuDNNGRU(
            self.num_neurons))(gru_output)
        dense_output = layers.Dense(6, activation='sigmoid')(gru_output)

        bigru_model = Model(embedded_input, dense_output)
        bigru_model.compile(optimizer=optimizers.Adam(),
                            loss=losses.binary_crossentropy)

        print('generated bigru model...')
        print(bigru_model.summary())

        return bigru_model