Пример #1
0
    def forward(self, pre_inputs, pre_length, hyp_inputs, hyp_length):
        pre_inputs_emb = self.emb_dropout(self.word_embedding(pre_inputs))
        hyp_inputs_emb = self.emb_dropout(self.word_embedding(hyp_inputs))
        (pre_hs, pre_cs), pre_hx = self.encoder(inputs=pre_inputs_emb)
        (hyp_hs, hyp_cs), hyp_hx = self.encoder(inputs=hyp_inputs_emb)
        if self.enc_bidir:
            pre_inputs_bw_emb = utils.reverse_padded_sequence(
                inputs=pre_inputs_emb, length=pre_length)
            hyp_inputs_bw_emb = utils.reverse_padded_sequence(
                inputs=hyp_inputs_emb, length=hyp_length)
            pre_hx_bw = hyp_hx_bw = None
            if self.enc_bidir_init:
                pre_hx_bw = pre_hx
                hyp_hx_bw = hyp_hx
            (pre_hs_bw,
             pre_cs_bw), pre_hx_bw = self.encoder_bw(inputs=pre_inputs_bw_emb,
                                                     hx=pre_hx_bw)
            (hyp_hs_bw,
             hyp_cs_bw), hyp_hx_bw = self.encoder_bw(inputs=hyp_inputs_bw_emb,
                                                     hx=hyp_hx_bw)
            # Note that backward states are 'flipped',
            # but we will anyway apply some pooling, so leave them for now...
            pre_hs = torch.cat([pre_hs, pre_hs_bw], dim=2)
            hyp_hs = torch.cat([hyp_hs, hyp_hs_bw], dim=2)
        pre_vector = self.enc_pool(inputs=pre_hs, length=pre_length)
        hyp_vector = self.enc_pool(inputs=hyp_hs, length=hyp_length)

        mlp_input = self.matching(s1=pre_vector, s2=hyp_vector)
        if self.mlp_use_bn:
            mlp_input = self.bn_mlp_input(mlp_input)
        mlp_input = self.clf_dropout(mlp_input)
        mlp_output = self.mlp(mlp_input)
        logit = self.output_linear(mlp_output)
        return logit
Пример #2
0
 def encode(self, inputs, length):
     inputs_emb = self.word_embedding(inputs)
     (hs, cs), hx = self.encoder(inputs=inputs_emb)
     if self.enc_bidir:
         inputs_bw_emb = utils.reverse_padded_sequence(
             inputs=inputs_emb, length=length)
         hx_bw = None
         if self.enc_bidir_init:
             hx_bw = hx
         (hs_bw, cs_bw), hx_bw = self.encoder_bw(
             inputs=inputs_bw_emb, hx=hx_bw)
         hs = torch.cat([hs, hs_bw], dim=2)
     sentence_vector = self.enc_pool(inputs=hs, length=length)
     return sentence_vector
Пример #3
0
    def forward(self, inputs, length):
        inputs_emb = self.emb_dropout(self.word_embedding(inputs))
        (hs, cs), hx = self.encoder(inputs=inputs_emb)
        if self.enc_bidir:
            inputs_bw_emb = utils.reverse_padded_sequence(inputs=inputs_emb,
                                                          length=length)
            hx_bw = None
            if self.enc_bidir_init:
                hx_bw = hx
            (hs_bw, cs_bw), hx_bw = self.encoder_bw(inputs=inputs_bw_emb,
                                                    hx=hx_bw)
            # Note that backward states are 'flipped',
            # but we will anyway apply some pooling, so leave them for now...
            hs = torch.cat([hs, hs_bw], dim=2)
        vector = self.enc_pool(inputs=hs, length=length)

        mlp_input = vector
        if self.mlp_use_bn:
            mlp_input = self.bn_mlp_input(mlp_input)
        mlp_input = self.clf_dropout(mlp_input)
        mlp_output = self.mlp(mlp_input)
        logit = self.output_linear(mlp_output)
        return logit