Exemple #1
0
    def forward(self, t, length):
        mask = torch.gt(torch.unsqueeze(t, 2), 0).type(
            torch.cuda.FloatTensor)  # (batch_size,sent_len,1)
        mask.requires_grad = False
        SEQ = mask.size(1)

        emb = t = self.embeds(t)

        t = self.fc1_dropout(t)
        t = nn.utils.rnn.pack_padded_sequence(t,
                                              lengths=length,
                                              batch_first=True)
        # t = t.mul(mask)  # (batch_size,sent_len,char_size)

        t1, (h_n, c_n) = self.lstm1(t, None)
        # t1, (h_n, c_n) = self.lstm2(t1, None)
        t1, _ = nn.utils.rnn.pad_packed_sequence(t1,
                                                 batch_first=True,
                                                 total_length=SEQ)

        t_max, t_max_index = seq_max_pool([t1, mask])

        t_dim = list(t1.size())[-1]
        o = seq_and_vec([t1, t_max])

        o = o.permute(0, 2, 1)
        o = self.conv1(o)

        o = o.permute(0, 2, 1)

        h_n = torch.cat((h_n[0], h_n[1]), dim=-1).unsqueeze(0)
        c_n = torch.cat((c_n[0], c_n[1]), dim=-1).unsqueeze(0)
        return o, (h_n, c_n)
Exemple #2
0
    def forward(self, t):
        mask = torch.gt(torch.unsqueeze(t, 2), 0).type(
            torch.cuda.FloatTensor
        )  # (batch_size,sent_len,1)
        mask.requires_grad = False
        t = self.embeds(t)

        t = self.fc1_dropout(t)

        t = t.mul(mask)  # (batch_size,sent_len,char_size)

        t, (h_n, c_n) = self.lstm1(t, None)
        t, (h_n, c_n) = self.lstm2(t, None)

        t_max, t_max_index = seq_max_pool([t, mask])

        t_dim = list(t.size())[-1]
        h = seq_and_vec([t, t_max])

        h = h.permute(0, 2, 1)
        h = self.conv1(h)

        h = h.permute(0, 2, 1)

        ps1 = self.fc_ps1(h)
        ps2 = self.fc_ps2(h)

        return [ps1, ps2, t, t_max, mask]
Exemple #3
0
    def forward(self, t, t_max, k1, k2):

        k1 = seq_gather([t, k1])

        k2 = seq_gather([t, k2])

        k = torch.cat([k1, k2], 1)
        h = seq_and_vec([t, t_max])
        h = seq_and_vec([h, k])
        h = h.permute(0, 2, 1)
        h = self.conv1(h)
        h = h.permute(0, 2, 1)

        po1 = self.fc_ps1(h)
        po2 = self.fc_ps2(h)

        return [po1.cuda(), po2.cuda()]
Exemple #4
0
    def to_rel(self, input, h, encoder_o, mask):
        output, attn, h = self.forward_step(input, h, encoder_o)
        new_encoder_o = seq_and_vec([encoder_o, output.squeeze(1)])

        new_encoder_o = new_encoder_o.permute(0, 2, 1)
        new_encoder_o = self.conv2_to_1_rel(new_encoder_o)
        new_encoder_o = new_encoder_o.permute(0, 2, 1)

        output = self.dropout(new_encoder_o)
        output = activation(output)
        output = self.rel(output)
        output, _ = seq_max_pool([output, mask])

        return output, h, new_encoder_o, attn
Exemple #5
0
    def to_ent(self, input, h, encoder_o, mask):
        # TODO mask
        output, attn, h = self.forward_step(input, h, encoder_o)
        output = output.squeeze(1)

        new_encoder_o = seq_and_vec([encoder_o, output])

        new_encoder_o = new_encoder_o.permute(0, 2, 1)
        new_encoder_o = self.conv2_to_1_ent(new_encoder_o)
        new_encoder_o = new_encoder_o.permute(0, 2, 1)

        output = self.dropout(new_encoder_o)
        output = activation(output)

        ent1 = self.ent1(output).squeeze(2)
        ent2 = self.ent2(output).squeeze(2)

        output = ent1, ent2

        return output, h, new_encoder_o, attn