Esempio n. 1
0
    def forward(self, input, hidden_state):

        f_output, h_output = self.gru(input, hidden_state)
        output = matrix_mul(f_output, self.sent_weight, self.sent_bias)
        output = matrix_mul(output, self.context_weight).permute(1, 0)
        output = F.softmax(output)
        output = element_wise_mul(f_output, output.permute(1, 0)).squeeze(0)
        output = self.fc(output)

        return output, h_output
    def forward(self, input, hidden_state):

        output = self.lookup(input)
        f_output, h_output = self.gru(output.float(), hidden_state)  # feature output and hidden state output
        output = matrix_mul(f_output, self.word_weight, self.word_bias)
        output = matrix_mul(output, self.context_weight).permute(1,0)
        output = F.softmax(output)
        output = element_wise_mul(f_output,output.permute(1,0))

        return output, h_output
Esempio n. 3
0
    def forward(self, input):

        f_output = self.mlp_graph(input)
        f_output = self.r(f_output)
        output = matrix_mul(f_output, self.word_weight, self.word_bias)
        #print(output.shape)
        output = matrix_mul(output, self.context_weight).permute(1, 0)
        output = F.softmax(output, dim=1)
        output = element_wise_mul(f_output, output.permute(1, 0))
        return f_output, output
Esempio n. 4
0
    def forward(self, input, hidden_state):

        output = self.lookup(input)
        f_output, h_output = self.gru(
            output.float(),
            hidden_state)  # feature output and hidden state output
        #this is equivalent to calculating K . as we are doing , K= W_K (x : all words to attend to)
        output = matrix_mul(f_output, self.word_weight, self.word_bias)
        #maybe this is the Q.KT line. we are imagining that the correct query vector, Q, will be learnt somehow by context weight here.
        #that is the context weight will learn the query, which is the most informative word in the sentence.
        output = matrix_mul(output, self.context_weight).permute(1, 0)
        output = F.softmax(output)
        output = self.element_wise_mul(f_output, output.permute(1, 0))

        return output, h_output
Esempio n. 5
0
    def forward(self, input, hidden_state):
        # Sen Encoder 放入gru feature output and hidden state output,
        f_output, h_output = self.gru(input, hidden_state)
        # 增加tone-layer MLP tanh()  get hidden representation
        output = torch.tanh(
            matrix_mul(f_output, self.sent_weight, self.sent_bias))
        # a word sen context vector
        output = matrix_mul(output, self.context_weight).permute(1, 0)
        # 归一化得到权重矩阵 importance weight ait through a softmax function
        output = F.softmax(output, dim=1)
        # 通过attention权重矩阵,将文本向量看作组成这些句子向量的加权求和。
        output = element_wise_mul(f_output, output.permute(1, 0)).squeeze(0)
        # output = self.fc(output) #删除fc针对文本分类输出的处理

        return output, h_output
 def forward(self, input, hidden_state):
     # 从数据字典中获取词向量
     output = self.lookup(input)
     # Word Encoder 放入gru feature output and hidden state output,
     f_output, h_output = self.gru(output.float(), hidden_state)
     # 增加tone-layer MLP tanh()  get hidden representation
     output = torch.tanh(
         matrix_mul(f_output, self.word_weight, self.word_bias))
     # a word level context vector
     output = matrix_mul(output, self.context_weight).permute(1, 0)
     # 归一化得到权重矩阵 importance weight ait through a softmax function
     output = F.softmax(output, dim=1)
     # 通过attention权重矩阵,将句子向量看作组成这些句子的词向量的加权求和。
     output = element_wise_mul(f_output, output.permute(1, 0))
     return output, h_output
Esempio n. 7
0
    def forward(self, input, hidden_state):

        f_output, h_output = self.gru(input, hidden_state)
        output = matrix_mul(f_output, self.sent_weight, self.sent_bias)
        output_2 = matrix_mul(output, self.context_weight).permute(1, 0)
        if torch.isnan(output_2[0][0]):
            print('nan detected')
            return None

        output = F.softmax(output_2, dim=-1)
        output = element_wise_mul(f_output, output.permute(1, 0)).squeeze(0)
        output = self.dropout(self.fc(output))


        return output, h_output
Esempio n. 8
0
    def forward(self, input, hidden_state):
        #print(input.type())
        #print(hidden_state.type())
        f_output, h_output = self.gru(input, hidden_state)
        #print('------------------>')
        #print(f_output.shape)
        output = matrix_mul(f_output, self.sent_weight, self.sent_bias)
        #print(output.shape)
        #print()
        output = matrix_mul(output, self.context_weight).permute(1, 0)
        output = F.softmax(output)
        output = element_wise_mul(f_output, output.permute(1, 0)).squeeze(0)
        #output = self.fc(output)

        return f_output, output, h_output