Esempio n. 1
0
    def forward(self, packed_input, packed_posterior,fw_adjs,bw_adjs,fw_edgeid,bw_edgeid,fw_edgedep,bw_edgedep,hidden=None):
    #input: pack(data x n_feature ,batch_size)
    #posterior: pack(data x src_len ,batch_size)
        assert isinstance(packed_input,PackedSequence)
        input = packed_input.data

        if self.alpha and self.training:
            input = data_dropout(input,self.alpha)
        word_fix_embed = self.word_fix_lut(input[:,TXT_WORD])
        lemma_emb = self.lemma_lut(input[:,TXT_LEMMA])
        pos_emb = self.pos_lut(input[:,TXT_POS])
        ner_emb = self.ner_lut(input[:,TXT_NER])

        emb = torch.cat([word_fix_embed,lemma_emb,pos_emb,ner_emb],1)#  data,embed

        emb = PackedSequence(emb, packed_input.batch_sizes)

        outputs, hidden_t = self.sencrnn(emb, hidden)

        # print("outputs",outputs)
        # print("self.Rnnsize",self.Rnnsize)
        node_embedding = self.node_embedding(outputs, fw_adjs, bw_adjs, fw_edgeid, bw_edgeid, fw_edgedep, bw_edgedep)
        poster_emb,amr_len = self.posteriorIndictedEmb(node_embedding,packed_posterior)

        # print("poster_emb", poster_emb)
        if hidden == None:

           Outputs = self.rnn(poster_emb, hidden)[0]   # h layer*batch_size*hidden_size

        else:
           print("yang hidden")
           Outputs = self.rnn(poster_emb, hidden)[0]

        return  DoublePackedSequence(Outputs,amr_len,Outputs.data)
Esempio n. 2
0
    def forward(self, packed_input: PackedSequence, hidden=None):
        #input: pack(data x n_feature ,batch_size)
        input = packed_input.data
        if self.alpha and self.training:
            input = data_dropout(input, self.alpha)

        word_fix_embed = self.word_fix_lut(input[:, TXT_WORD])
        lemma_emb = self.lemma_lut(input[:, TXT_LEMMA])
        pos_emb = self.pos_lut(input[:, TXT_POS])
        ner_emb = self.ner_lut(input[:, TXT_NER])

        emb = self.drop_emb(torch.cat([lemma_emb, pos_emb, ner_emb],
                                      1))  #  data,embed
        emb = torch.cat([word_fix_embed, emb], 1)  #  data,embed
        emb = PackedSequence(emb, packed_input.batch_sizes)
        outputs, hidden_t = self.rnn(emb, hidden)
        return outputs
Esempio n. 3
0
    def forward(self, input, index,src_enc):
        assert isinstance(input, MyPackedSequence),input
        input,lengths = input
        if self.alpha and self.training:
            input = data_dropout(input,self.alpha)
        cat_embed = self.cat_lut(input[:,AMR_CAT])
        lemma_embed = self.lemma_lut(input[:,AMR_LE])

        amr_emb = torch.cat([cat_embed,lemma_embed],1)
    #    print (input,lengths)

        head_emb = self.getEmb(index,src_enc)  #packed, mydoublepacked


        root_emb = torch.cat([amr_emb,head_emb.data],1)
        root_emb = self.root(root_emb)

        return MyPackedSequence(root_emb,lengths)
Esempio n. 4
0
    def forward(self, packed_input,hidden=None):
    #input: pack(data x n_feature ,batch_size)
    #posterior: pack(data x src_len ,batch_size)
        assert isinstance(packed_input,PackedSequence)
        input = packed_input.data
        if self.alpha and self.training:
            input = data_dropout(input,self.alpha)

        word_fix_embed = self.word_fix_lut(input[:,TXT_WORD])
        lemma_emb = self.lemma_lut(input[:,TXT_LEMMA])
        pos_emb = self.pos_lut(input[:,TXT_POS])
        ner_emb = self.ner_lut(input[:,TXT_NER])

        emb = torch.cat([word_fix_embed,lemma_emb,pos_emb,ner_emb],1)#  data,embed

        emb = PackedSequence(emb, packed_input.batch_sizes)


        outputs = self.rnn(emb, hidden)[0]

        return  outputs
Esempio n. 5
0
    def forward(self, input, index,src_enc):
        assert isinstance(input, MyPackedSequence),input
        input,lengths = input
        if self.alpha and self.training:
            input = data_dropout(input,self.alpha)
        cat_embed = self.cat_lut(input[:,AMR_CAT])
        lemma_embed = self.lemma_lut(input[:,AMR_LE])

        amr_emb = torch.cat([cat_embed,lemma_embed],1)
    #    print (input,lengths)

        head_emb_t,dep_emb_t,length_pairs = self.getEmb(index,src_enc)  #packed, mydoublepacked


        head_emb = torch.cat([amr_emb,head_emb_t.data],1)

        dep_amr_emb_t = myunpack(*MyPackedSequence(amr_emb,lengths))
        dep_amr_emb = [ emb.unsqueeze(0).expand(emb.size(0),emb.size(0),emb.size(-1))      for emb in dep_amr_emb_t]

        mydouble_amr_emb = mydoublepack(dep_amr_emb,length_pairs)

    #    print ("rel_encoder",mydouble_amr_emb.data.size(),dep_emb_t.data.size())
        dep_emb = torch.cat([mydouble_amr_emb.data,dep_emb_t.data],-1)

       # emb_unpacked = myunpack(emb,lengths)

        head_packed = MyPackedSequence(self.head(head_emb),lengths) #  total,rel_dim
        head_amr_packed = MyPackedSequence(amr_emb,lengths) #  total,rel_dim

   #     print ("dep_emb",dep_emb.size())
        size = dep_emb.size()
        dep = self.dep(dep_emb.view(-1,size[-1])).view(size[0],size[1],-1)

        dep_packed  = MyDoublePackedSequence(MyPackedSequence(dep,mydouble_amr_emb[0][1]),mydouble_amr_emb[1],dep)

        return  head_amr_packed,head_packed,dep_packed  #,MyPackedSequence(emb,lengths)