def __init__(self, conf, vocab, char_vocab, tag_vocab): super(C_CNN, self).__init__() # CHARACTER EMBEDDINGS AND WORD-LEVEL BILSTM self.char_cnn = char_CNN(conf, char_vocab, self.device) in_shape = sum([l["filters"] for l in conf["c_cnn_layers"]]) # Main CNN # ====================================================================== self.w_cnn_layers = word_CNN(conf["w_cnn_layers"], in_shape, 1, keep_dims=True) output_size = sum([l["filters"] for l in conf["w_cnn_layers"]]) self.mid_dropout = nn.Dropout(conf["mid_dropout"]) self.w_bin_out = conf["w_bin_out"] out_bin_size = 2 * output_size if self.w_bin_out == "maxpool" else output_size # Output Layers # ====================================================================== # Binary self.bin_out = nn.Linear(out_bin_size, 1) # NER self.n_tags = len(tag_vocab) self.ner_out = nn.Linear(output_size, self.n_tags) # CRF Layer self.use_crf = conf["use_crf"] if self.use_crf: self.crf = CRF(self.n_tags, batch_first=True) # ====================================================================== # Maybe move to GPU self.to(self.device)
def __init__(self, conf, vocab, char_vocab, tag_vocab): super(CNN_RNN, self).__init__() # Word embedding and initial dropout self.w_input = word_emb(conf, vocab) # CHARACTER EMBEDDINGS AND WORD-LEVEL BILSTM self.use_chars = conf["use_chars"] if self.use_chars: self.char_cnn = char_CNN(conf, char_vocab, self.device) in_shape = self.char_cnn.output_size + conf["w_dim"] else: in_shape = conf["w_dim"] # Main BiLSTM self.word_RNN = word_RNN(in_shape, conf["w_rnn_out"], conf["w_rnn_layers"]) output_size = self.word_RNN.output_size # Output Layer self.mid_dropout = nn.Dropout(conf["mid_dropout"]) self.n_tags = len(tag_vocab) self.output = nn.Linear(output_size, self.n_tags) # CRF Layer self.use_crf = conf["use_crf"] if self.use_crf: self.crf = CRF(self.n_tags, batch_first=True) # Maybe move to GPU self.to(self.device)
def __init__(self, conf, vocab, char_vocab, tag_vocab): super(C_CNN, self).__init__() # CHARACTER EMBEDDINGS AND WORD-LEVEL BILSTM self.char_cnn = char_CNN(conf, char_vocab, self.device) in_shape = self.char_cnn.output_size # CNN self.w_cnn_layers = word_CNN(conf["w_cnn_layers"], in_shape, 1, keep_dims=True) output_size = self.w_cnn_layers.output_size # Output Layer self.mid_dropout = nn.Dropout(conf["mid_dropout"]) self.n_tags = len(tag_vocab) self.output = nn.Linear(output_size, self.n_tags) # CRF Layer self.use_crf = conf["use_crf"] if self.use_crf: self.crf = CRF(self.n_tags, batch_first=True) # reset parameters to initializer # self.apply(init_all) # Maybe move to GPU self.to(self.device)
def __init__(self, conf, vocab, char_vocab): super(CNN, self).__init__() # Word embedding and initial dropout self.w_input = word_emb(conf, vocab) # CHARACTER EMBEDDINGS AND WORD-LEVEL BILSTM self.use_chars = conf["use_chars"] if self.use_chars: self.char_cnn = char_CNN(conf, char_vocab, self.device) in_shape = self.char_cnn.output_size + conf["w_dim"] else: in_shape = conf["w_dim"] # CNN self.w_cnn_layers = word_CNN(conf["w_cnn_layers"], in_shape, 1) output_size = self.w_cnn_layers.output_size # Output Layer # self.mid_norm = nn.BatchNorm1d(output_size) self.mid_dropout = nn.Dropout(conf["mid_dropout"]) self.output = nn.Linear(output_size, 1) # reset parameters to initializer # self.apply(init_all) # Maybe move to GPU self.to(self.device)
def __init__(self, conf, vocab, char_vocab, tag_vocab): super(CNN_RNN, self).__init__() # WORD EMBEDDINGS AND TWEET-LEVEL BILSTM # ====================================================================== self.w_emb = Embedding(conf, vocab, "w_dim") # Sentence Input Dropout self.w_in_dropout = SpatialDropout(conf["w_in_dropout"]) # CHARACTER EMBEDDINGS AND WORD-LEVEL CNN # ====================================================================== self.use_chars = conf["use_chars"] if self.use_chars: self.char_cnn = char_CNN(conf, char_vocab, self.device) in_shape = sum([l["filters"] for l in conf["c_cnn_layers"]]) in_shape += conf["w_dim"] else: in_shape = conf["w_dim"] # Main BiLSTM # ====================================================================== self.word_RNN = word_RNN(in_shape, None, conf["w_rnn_layers"]) output_size = self.word_RNN.output_size self.mid_dropout = nn.Dropout(conf["mid_dropout"]) self.w_bin_out = conf["w_bin_out"] out_bin_size = 2 * output_size if self.w_bin_out == "maxpool" else output_size # Output Layers # ====================================================================== # Binary self.bin_out = nn.Linear(out_bin_size, 1) # NER self.n_tags = len(tag_vocab) self.ner_out = nn.Linear(output_size, self.n_tags) # CRF Layer self.use_crf = conf["use_crf"] if self.use_crf: self.crf = CRF(self.n_tags, batch_first=True) # ====================================================================== # Maybe move to GPU self.to(self.device)
def __init__(self, conf, vocab, char_vocab): super(C_CNN, self).__init__() # CHARACTER EMBEDDINGS AND WORD-LEVEL BILSTM self.char_cnn = char_CNN(conf, char_vocab, self.device) in_shape = self.char_cnn.output_size # CNN self.w_cnn_layers = word_CNN(conf["w_cnn_layers"], in_shape, 1) output_size = self.w_cnn_layers.output_size # Output Layer self.mid_dropout = nn.Dropout(conf["mid_dropout"]) self.output = nn.Linear(output_size, 1) # reset parameters to initializer # self.apply(init_all) # Maybe move to GPU self.to(self.device)