def layer_forward(self, x, hx, cell, batch_sizes, reverse=False): hx_0 = hx_i = hx hx_n, output = [], [] steps = reversed(range(len(x))) if reverse else range(len(x)) if self.training: hid_mask = SharedDropout.get_mask(hx_0[0], self.dropout) for t in steps: last_batch_size, batch_size = len(hx_i[0]), batch_sizes[t] if last_batch_size < batch_size: hx_i = [ torch.cat((h, ih[last_batch_size:batch_size])) for h, ih in zip(hx_i, hx_0) ] else: hx_n.append([h[batch_size:] for h in hx_i]) hx_i = [h[:batch_size] for h in hx_i] hx_i = [h for h in cell(x[t], hx_i)] output.append(hx_i[0]) if self.training: hx_i[0] = hx_i[0] * hid_mask[:batch_size] if reverse: hx_n = hx_i output.reverse() else: hx_n.append(hx_i) hx_n = [torch.cat(h) for h in zip(*reversed(hx_n))] output = torch.cat(output) return output, hx_n
def forward(self, sequence, hx=None): r""" Args: sequence (~torch.nn.utils.rnn.PackedSequence): A packed variable length sequence. hx (~torch.Tensor, ~torch.Tensor): A tuple composed of two tensors `h` and `c`. `h` of shape ``[num_layers*2, batch_size, hidden_size]`` contains the initial hidden state for each element in the batch. `c` of shape ``[num_layers*2, batch_size, hidden_size]`` contains the initial cell state for each element in the batch. If `hx` is not provided, both `h` and `c` default to zero. Default: ``None``. Returns: ~torch.nn.utils.rnn.PackedSequence, (~torch.Tensor, ~torch.Tensor): The first is a packed variable length sequence. The second is a tuple of tensors `h` and `c`. `h` of shape ``[num_layers*2, batch_size, hidden_size]`` contains the hidden state for `t = seq_len`. Like output, the layers can be separated using ``h.view(num_layers, 2, batch_size, hidden_size)`` and similarly for c. `c` of shape ``[num_layers*2, batch_size, hidden_size]`` contains the cell state for `t = seq_len`. """ x, batch_sizes = sequence.data, sequence.batch_sizes.tolist() batch_size = batch_sizes[0] h_n, c_n = [], [] if hx is None: ih = x.new_zeros(self.num_layers * 2, batch_size, self.hidden_size) h, c = ih, ih else: h, c = self.permute_hidden(hx, sequence.sorted_indices) h = h.view(self.num_layers, 2, batch_size, self.hidden_size) c = c.view(self.num_layers, 2, batch_size, self.hidden_size) for i in range(self.num_layers): x = torch.split(x, batch_sizes) if self.training: mask = SharedDropout.get_mask(x[0], self.dropout) x = [i * mask[:len(i)] for i in x] x_f, (h_f, c_f) = self.layer_forward(x=x, hx=(h[i, 0], c[i, 0]), cell=self.f_cells[i], batch_sizes=batch_sizes) x_b, (h_b, c_b) = self.layer_forward(x=x, hx=(h[i, 1], c[i, 1]), cell=self.b_cells[i], batch_sizes=batch_sizes, reverse=True) x = torch.cat((x_f, x_b), -1) h_n.append(torch.stack((h_f, h_b))) c_n.append(torch.stack((c_f, c_b))) x = PackedSequence(x, sequence.batch_sizes, sequence.sorted_indices, sequence.unsorted_indices) hx = torch.cat(h_n, 0), torch.cat(c_n, 0) hx = self.permute_hidden(hx, sequence.unsorted_indices) return x, hx
def forward(self, sequence, hx=None): """ Args: sequence (PackedSequence): A packed variable length sequence. hx (tuple[torch.Tensor, torch.Tensor]): h (``[num_layers * 2, batch_size, hidden_size]``) contains the initial hidden state for each element in the batch. c (``[num_layers * 2, batch_size, hidden_size]``) contains the initial cell state for each element in the batch. If (h, x) is not provided, both h and c default to zero. Default: None. Returns: x (PackedSequence): A packed variable length sequence. hx (tuple[torch.Tensor, torch.Tensor]): h (``[num_layers * 2, batch_size, hidden_size]``) contains the hidden state for ``t = seq_len``. Like output, the layers can be separated using ``h.view(num_layers, 2, batch_size, hidden_size)`` and similarly for c. c (``[num_layers * 2, batch_size, hidden_size]``) contains the cell state for ``t = seq_len``. """ x, batch_sizes = sequence.data, sequence.batch_sizes.tolist() batch_size = batch_sizes[0] h_n, c_n = [], [] if hx is None: ih = x.new_zeros(self.num_layers * 2, batch_size, self.hidden_size) h, c = ih, ih else: h, c = self.permute_hidden(hx, sequence.sorted_indices) h = h.view(self.num_layers, 2, batch_size, self.hidden_size) c = c.view(self.num_layers, 2, batch_size, self.hidden_size) for i in range(self.num_layers): x = torch.split(x, batch_sizes) if self.training: mask = SharedDropout.get_mask(x[0], self.dropout) x = [i * mask[:len(i)] for i in x] x_f, (h_f, c_f) = self.layer_forward(x=x, hx=(h[i, 0], c[i, 0]), cell=self.f_cells[i], batch_sizes=batch_sizes) x_b, (h_b, c_b) = self.layer_forward(x=x, hx=(h[i, 1], c[i, 1]), cell=self.b_cells[i], batch_sizes=batch_sizes, reverse=True) x = torch.cat((x_f, x_b), -1) h_n.append(torch.stack((h_f, h_b))) c_n.append(torch.stack((c_f, c_b))) x = PackedSequence(x, sequence.batch_sizes, sequence.sorted_indices, sequence.unsorted_indices) hx = torch.cat(h_n, 0), torch.cat(c_n, 0) hx = self.permute_hidden(hx, sequence.unsorted_indices) return x, hx
def __init__(self, n_in, n_out, dropout=0): super().__init__() self.n_in = n_in self.n_out = n_out self.linear = nn.Linear(n_in, n_out) self.activation = nn.LeakyReLU(negative_slope=0.1) self.dropout = SharedDropout(p=dropout) self.reset_parameters()
def __init__(self, n_words, n_feats, n_labels, feat='char', n_embed=100, n_feat_embed=100, n_char_embed=50, bert=None, n_bert_layers=4, max_len=None, mix_dropout=.0, embed_dropout=.33, n_lstm_hidden=400, n_lstm_layers=3, lstm_dropout=.33, n_mlp_span=500, n_mlp_label=100, mlp_dropout=.33, feat_pad_index=0, pad_index=0, unk_index=1, **kwargs): super().__init__() self.args = Config().update(locals()) # the embedding layer self.word_embed = nn.Embedding(num_embeddings=n_words, embedding_dim=n_embed) if feat == 'char': self.feat_embed = CharLSTM(n_chars=n_feats, n_embed=n_char_embed, n_out=n_feat_embed, pad_index=feat_pad_index) elif feat == 'bert': self.feat_embed = BertEmbedding(model=bert, n_layers=n_bert_layers, n_out=n_feat_embed, pad_index=feat_pad_index, max_len=max_len, dropout=mix_dropout) self.n_feat_embed = self.feat_embed.n_out elif feat == 'tag': self.feat_embed = nn.Embedding(num_embeddings=n_feats, embedding_dim=n_feat_embed) else: raise RuntimeError("The feat type should be in ['char', 'bert', 'tag'].") self.embed_dropout = IndependentDropout(p=embed_dropout) # the lstm layer self.lstm = BiLSTM(input_size=n_embed+n_feat_embed, hidden_size=n_lstm_hidden, num_layers=n_lstm_layers, dropout=lstm_dropout) self.lstm_dropout = SharedDropout(p=lstm_dropout) # the MLP layers self.mlp_span_l = MLP(n_in=n_lstm_hidden*2, n_out=n_mlp_span, dropout=mlp_dropout) self.mlp_span_r = MLP(n_in=n_lstm_hidden*2, n_out=n_mlp_span, dropout=mlp_dropout) self.mlp_label_l = MLP(n_in=n_lstm_hidden*2, n_out=n_mlp_label, dropout=mlp_dropout) self.mlp_label_r = MLP(n_in=n_lstm_hidden*2, n_out=n_mlp_label, dropout=mlp_dropout) # the Biaffine layers self.span_attn = Biaffine(n_in=n_mlp_span, bias_x=True, bias_y=False) self.label_attn = Biaffine(n_in=n_mlp_label, n_out=n_labels, bias_x=True, bias_y=True) self.crf = CRFConstituency() self.criterion = nn.CrossEntropyLoss() self.pad_index = pad_index self.unk_index = unk_index
def __init__(self, n_words, n_feats, n_rels, encoder='lstm', feat='char', n_embed=100, n_feat_embed=100, n_char_embed=50, bert=None, n_bert_layers=4, mix_dropout=.0, embed_dropout=.33, n_lstm_hidden=400, n_lstm_layers=3, n_att_layers=6, lstm_dropout=.33, n_mlp_arc=500, n_mlp_rel=100, mlp_dropout=.33, feat_pad_index=0, pad_index=0, unk_index=1, **kwargs): super().__init__() self.args = Config().update(locals()) self.word_embed = nn.Embedding(num_embeddings=n_words, embedding_dim=n_embed) # can trained embed:word in train self.feat_embed = CharLSTM(n_chars=n_feats, n_embed=n_char_embed, n_out=n_feat_embed, pad_index=feat_pad_index) self.embed_dropout = IndependentDropout(p=embed_dropout) # 输入层的dropout,采用独立dropout self.encoder_type=encoder if(encoder=='lstm'): self.encoder = BiLSTM(input_size=n_embed + n_feat_embed, hidden_size=n_lstm_hidden, num_layers=n_lstm_layers, dropout=lstm_dropout) self.lstm_dropout = SharedDropout(p=lstm_dropout) # 编码层lstm以及shared dropout elif(encoder=='att'): d_input=n_embed + n_feat_embed self.linear1=nn.Linear(d_input,n_lstm_hidden * 2) # 前加 self.encoder=Attention_encoder(d_model=n_lstm_hidden * 2,n_layers=n_att_layers) # self.linear2=nn.Linear(512,n_lstm_hidden * 2,bias=False) # 后加 self.mlp_arc_d = MLP(n_in=n_lstm_hidden * 2, n_out=n_mlp_arc, dropout=mlp_dropout) self.mlp_arc_h = MLP(n_in=n_lstm_hidden * 2, n_out=n_mlp_arc, dropout=mlp_dropout) self.mlp_rel_d = MLP(n_in=n_lstm_hidden * 2, n_out=n_mlp_rel, dropout=mlp_dropout) self.mlp_rel_h = MLP(n_in=n_lstm_hidden * 2, n_out=n_mlp_rel, dropout=mlp_dropout) # 四个不同的全连接层,映射到对应的维度 self.arc_attn = Biaffine(n_in=n_mlp_arc, bias_x=True, bias_y=False) self.rel_attn = Biaffine(n_in=n_mlp_rel, n_out=n_rels, bias_x=True, bias_y=True) self.criterion = nn.CrossEntropyLoss() self.pad_index = pad_index self.unk_index = unk_index
def __init__(self, n_words, n_labels, n_tags=None, n_chars=None, n_lemmas=None, feat='tag,char,lemma', n_embed=100, n_embed_proj=125, n_feat_embed=100, n_char_embed=50, char_pad_index=0, bert=None, n_bert_layers=4, mix_dropout=.0, bert_pad_index=0, embed_dropout=.2, n_lstm_hidden=600, n_lstm_layers=3, lstm_dropout=.33, n_mlp_edge=600, n_mlp_label=600, edge_mlp_dropout=.25, label_mlp_dropout=.33, interpolation=0.1, pad_index=0, unk_index=1, **kwargs): super().__init__() self.args = Config().update(locals()) # the embedding layer self.word_embed = nn.Embedding(num_embeddings=n_words, embedding_dim=n_embed) self.embed_proj = nn.Linear(n_embed, n_embed_proj) self.n_input = n_embed + n_embed_proj if 'tag' in feat: self.tag_embed = nn.Embedding(num_embeddings=n_tags, embedding_dim=n_feat_embed) self.n_input += n_feat_embed if 'char' in feat: self.char_embed = CharLSTM(n_chars=n_chars, n_embed=n_char_embed, n_out=n_feat_embed, pad_index=char_pad_index) self.n_input += n_feat_embed if 'lemma' in feat: self.lemma_embed = nn.Embedding(num_embeddings=n_lemmas, embedding_dim=n_feat_embed) self.n_input += n_feat_embed if 'bert' in feat: self.bert_embed = BertEmbedding(model=bert, n_layers=n_bert_layers, pad_index=bert_pad_index, dropout=mix_dropout) self.n_input += self.bert_embed.n_out self.embed_dropout = IndependentDropout(p=embed_dropout) # the lstm layer self.lstm = LSTM(input_size=self.n_input, hidden_size=n_lstm_hidden, num_layers=n_lstm_layers, bidirectional=True, dropout=lstm_dropout) self.lstm_dropout = SharedDropout(p=lstm_dropout) # the MLP layers self.mlp_edge_d = MLP(n_in=n_lstm_hidden*2, n_out=n_mlp_edge, dropout=edge_mlp_dropout, activation=False) self.mlp_edge_h = MLP(n_in=n_lstm_hidden*2, n_out=n_mlp_edge, dropout=edge_mlp_dropout, activation=False) self.mlp_label_d = MLP(n_in=n_lstm_hidden*2, n_out=n_mlp_label, dropout=label_mlp_dropout, activation=False) self.mlp_label_h = MLP(n_in=n_lstm_hidden*2, n_out=n_mlp_label, dropout=label_mlp_dropout, activation=False) # the Biaffine layers self.edge_attn = Biaffine(n_in=n_mlp_edge, n_out=2, bias_x=True, bias_y=True) self.label_attn = Biaffine(n_in=n_mlp_label, n_out=n_labels, bias_x=True, bias_y=True) self.criterion = nn.CrossEntropyLoss() self.interpolation = interpolation self.pad_index = pad_index self.unk_index = unk_index
def __init__(self, n_words, n_feats, n_labels, feat='char', n_embed=100, n_feat_embed=100, n_char_embed=50, bert=None, n_bert_layers=4, mix_dropout=.0, embed_dropout=.33, n_lstm_hidden=600, n_lstm_layers=3, lstm_dropout=.33, n_mlp_edge=600, n_mlp_label=600, mlp_dropout=.33, feat_pad_index=0, pad_index=0, unk_index=1, **kwargs): super().__init__() self.args = Config().update(locals()) # the embedding layer self.word_embed = nn.Embedding(num_embeddings=n_words, embedding_dim=n_embed) # the linear to transform 100d glove to 125d self.glove_linear = nn.Linear(100, n_embed) # 用的glove_100d if feat == 'char': self.feat_embed = CharLSTM(n_chars=n_feats, n_embed=n_char_embed, n_out=n_feat_embed, pad_index=feat_pad_index) elif feat == 'bert': self.feat_embed = BertEmbedding(model=bert, n_layers=n_bert_layers, n_out=n_feat_embed, pad_index=feat_pad_index, dropout=mix_dropout) self.n_feat_embed = self.feat_embed.n_out elif feat == 'tag': self.feat_embed = nn.Embedding(num_embeddings=n_feats, embedding_dim=n_feat_embed) else: raise RuntimeError( "The feat type should be in ['char', 'bert', 'tag'].") self.embed_dropout = IndependentDropout(p=embed_dropout) # the lstm layer self.lstm = LSTM(input_size=n_embed + n_feat_embed, hidden_size=n_lstm_hidden, num_layers=n_lstm_layers, bidirectional=True, dropout=lstm_dropout) self.lstm_dropout = SharedDropout(p=lstm_dropout) # the MLP layers self.mlp_edge_d = MLP(n_in=n_lstm_hidden * 2, n_out=n_mlp_edge, dropout=mlp_dropout) self.mlp_edge_h = MLP(n_in=n_lstm_hidden * 2, n_out=n_mlp_edge, dropout=mlp_dropout) self.mlp_label_d = MLP(n_in=n_lstm_hidden * 2, n_out=n_mlp_label, dropout=mlp_dropout) self.mlp_label_h = MLP(n_in=n_lstm_hidden * 2, n_out=n_mlp_label, dropout=mlp_dropout) # the Biaffine layers self.edge_attn = Biaffine(n_in=n_mlp_edge, n_out=2, bias_x=True, bias_y=True) self.label_attn = Biaffine(n_in=n_mlp_label, n_out=n_labels, bias_x=True, bias_y=True) self.criterion = nn.CrossEntropyLoss() self.pad_index = pad_index self.unk_index = unk_index