def get_pos_matrix(self, pos_list):
     pos_emb = [
         self.pos_embeddings(
             utils.set_cuda(torch.tensor(word_pos), self.cuda_on))
         for word_pos in pos_list
     ]
     return torch.stack(pos_emb)
 def get_ner_matrix(self, ner_list):
     ner_emb = [
         self.ner_embeddings(
             utils.set_cuda(torch.tensor(word_ner), self.cuda_on))
         for word_ner in ner_list
     ]
     return torch.stack(ner_emb)
 def compute_similarity(self, word, question):
     similarity = []
     word_value = word.lower() if word.lower(
     ) in self.words_embeddings.vocab else 'unk'
     for question_word in question:
         question_value = question_word.lower() if question_word.lower(
         ) in self.words_embeddings.vocab else 'unk'
         similarity.append(
             self.words_embeddings.similarity(word_value, question_value))
     return utils.set_cuda(torch.tensor(sum(similarity)), self.cuda_on)
    def forward(self, paragraph, question):
        paragraph_emb = self.get_sentence_embeddings(paragraph['context'])
        paragraph_pos = paragraph['context_pos']
        paragraph_ner = paragraph['context_ner']
        paragraph_match = torch.stack([
            utils.set_cuda(torch.tensor(match), self.cuda_on)
            for match in question['exact_match']
        ])
        question_emb = self.get_sentence_embeddings(question['question'])
        paragraph_vector = self.create_doc_vector(paragraph_emb, paragraph_pos,
                                                  paragraph_ner,
                                                  paragraph_match,
                                                  paragraph['context'],
                                                  question['question'])
        size = question_emb.size()
        question_vector = self.qFFN(question_emb.view(1, size[0], size[1]))
        size = paragraph_vector.size()
        paragraph_vector = self.dFFN(paragraph_vector.view(
            1, size[0], size[1]))

        return paragraph_vector, question_vector, paragraph_emb, question_emb
 def get_word_embeddings(self, word):
     return utils.set_cuda(
         torch.tensor(
             self.words_embeddings[word.lower()] if word.lower() in
             self.words_embeddings.vocab else self.words_embeddings['unk']),
         self.cuda_on)