Exemple #1
0
 def build_model(self, embedding, **kwargs) -> tf.keras.Model:
     embed = build_embedding(embedding, self.transform.word_vocab,
                             self.transform)
     self.transform.map_x = embed.dtype != tf.string
     model = CNNTaggingModel(num_tags=len(self.transform.tag_vocab),
                             embed=embed,
                             **kwargs)
     # model.build((None, None, 3))
     return model
Exemple #2
0
    def build_model(self, word_embed, ngram_embed, window_size, weight_norm,
                    filters, kernel_size, dropout_embed, dropout_hidden,
                    **kwargs) -> tf.keras.Model:
        word_vocab, ngram_vocab, tag_vocab = self.transform.word_vocab, self.transform.ngram_vocab, \
                                             self.transform.tag_vocab
        word_embed = build_embedding(word_embed, word_vocab, self.transform)
        if 'map_x' in self.config:
            self.config.map_word_feature = self.config.map_x
            del self.config.map_x
        else:
            self.config.map_word_feature = True
        if window_size:
            ngram_embed = build_embedding(ngram_embed, ngram_vocab,
                                          self.transform)
        else:
            ngram_embed = None
        model = NgramConvTaggingModel(word_embed, ngram_embed, filters,
                                      kernel_size, dropout_embed,
                                      dropout_hidden, weight_norm,
                                      len(tag_vocab))

        return model
 def build_model(self, embeddings, embedding_trainable, rnn_input_dropout, rnn_output_dropout, rnn_units,
                 loss,
                 **kwargs) -> tf.keras.Model:
     model = tf.keras.Sequential()
     embeddings = build_embedding(embeddings, self.transform.word_vocab, self.transform)
     model.add(embeddings)
     if rnn_input_dropout:
         model.add(tf.keras.layers.Dropout(rnn_input_dropout, name='rnn_input_dropout'))
     model.add(
         tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=rnn_units, return_sequences=True), name='bilstm'))
     if rnn_output_dropout:
         model.add(tf.keras.layers.Dropout(rnn_output_dropout, name='rnn_output_dropout'))
     model.add(tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(len(self.transform.tag_vocab)), name='dense'))
     return model
Exemple #4
0
 def build_model(self, pretrained_embed, n_embed, training, **kwargs) -> tf.keras.Model:
     if training:
         self.config.n_words = len(self.transform.form_vocab)
     else:
         self.config.lstm_dropout = 0.  # keras will use cuda lstm when config.lstm_dropout is 0
     self.config.n_feats = len(self.transform.cpos_vocab)
     self.config.n_rels = len(self.transform.rel_vocab)
     self.config.pad_index = self.transform.form_vocab.pad_idx
     self.config.unk_index = self.transform.form_vocab.unk_idx
     self.config.bos_index = 2
     pretrained: tf.keras.layers.Embedding = build_embedding(pretrained_embed, self.transform.form_vocab,
                                                             self.transform) if pretrained_embed else None
     if pretrained_embed:
         self.config.n_embed = pretrained.output_dim
     model = BiaffineModel(self.config, pretrained)
     return model