Пример #1
0
 def forward(self, context_idx, context_mask, context_segements,
             query_ids, query_mask, query_segements):
   query_emb = self.embed_question(query_ids, query_segements)
   context_emb = self.embed_context(context_idx, context_segements)
   embedding = torch.cat((query_emb[:, 0], context_emb[:, 0]), dim=1)
   start_embedding = self.start_pointer(embedding)
   end_embeddings = self.end_pointer(embedding)
   start_embedding = mask(start_embedding, context_mask, -1)
   end_embeddings = mask(end_embeddings, context_mask, -1)
   return start_embedding, end_embeddings
Пример #2
0
 def pointer(self, embeddings, input_mask):
     """"""
     # size: batch_size, seq_length, 2
     embeddings = self.pointer_linear(embeddings)
     embeddings = mask(embeddings, input_mask, -2)
     # start_embeddings = embeddings[:, :, 0].squeeze(dim=-1)
     # end_embeddings = embeddings[:, :, 1].squeeze(dim=-1)
     return embeddings
Пример #3
0
Файл: rmq.py Проект: okateim/RmQ
    def _RmQ_l(self, l, i, j):
        if i == j:
            return i
        if i > j:
            i, j = j, i

        w = mask(l[j], i)
        return 0 if w == 0 else lsb(w)
Пример #4
0
 def query_pointer(self, embeddings, input_mask):
   """"""
   # size: batch_size, seq_length, 2
   batch_size, len, dim = embeddings.shape
   embeddings = self.query_pointor_linear(embeddings)
   embeddings = reshape_tensor(embeddings, (batch_size, 512, 2))
   embeddings = mask(embeddings, input_mask, -2)
   start_embeddings = embeddings[:, :, 0].squeeze(dim=-1)
   end_embeddings = embeddings[:, :, 1].squeeze(dim=-1)
   return start_embeddings, end_embeddings