示例#1
0
 def __init__(self, dim):
     super().__init__()
     self.attn = Attention1D()
     self.query = nn.Parameter(torch.Tensor(dim))
     bound = 1 / math.sqrt(dim)
     nn.init.uniform_(self.query, -bound, bound)
     self.to(get_best_device())
示例#2
0
 def __init__(self, fastTextModel, random_init=False):
     super().__init__()
     self.model = fastTextModel
     self.code_embedder = SelfAttnEmbedder(fastTextModel, random_init)
     self.description_embedder = AverageEmbedder(fastTextModel, random_init)
     self.cosine = nn.CosineSimilarity()
     self.to(get_best_device())
示例#3
0
 def forward(self, tokens, mask):
     """
     tokens: list of list of strings: batch_size x time
     mask: batch_size x time
     """
     mask = mask.to(get_best_device())
     batch_size, time = mask.size()
     # batch_size * time x dims
     token_embeddings = self.embedding_bag(*tokens)
     token_embeddings = token_embeddings.view(batch_size, time, -1)
     token_embeddings *= mask.unsqueeze(-1)
     return token_embeddings.sum(axis=1) / mask.sum(axis=1).unsqueeze(-1)
示例#4
0
    def forward(self, query, context, mask):
        """
        query: batch_size x dims
        context: batch_size x time x dims
        mask: batch_size x time
        """
        mask = mask.to(get_best_device())
        batch_size = context.size(0)
        # context: batch_size x time x dims
        # query: batch_size x dims => batch_size x dims x 1
        query = query.unsqueeze(0).repeat(batch_size, 1).unsqueeze(-1)
        # attn_scores: batch_size x time x 1

        attn_scores = torch.where(
            mask.view(batch_size, -1) != 0.,
            torch.bmm(context, query).squeeze(-1), self.minus_inf)

        # attn_weights: batch_size x time
        attn_weights = self.softmax(attn_scores.squeeze(-1))
        # output: batch_size x dims
        output = (attn_weights.unsqueeze(-1) * context).sum(1)
        return output, attn_weights
示例#5
0
 def __init__(self, fastTextModel, random_init=False):
     super().__init__()
     self.embedding_bag = FastTextEmbeddingBag(fastTextModel, random_init)
     self.attn = SelfAttention1D(self.embedding_bag.dim)
     self.to(get_best_device())
示例#6
0
 def __init__(self):
     super().__init__()
     self.softmax = nn.Softmax(dim=-1)
     self.minus_inf = torch.tensor([[-float('inf')]],
                                   device=get_best_device())  # 1 x 1
     self.to(get_best_device())
示例#7
0
 def forward(self, ind, offsets):
     ind = ind.to(get_best_device())
     offsets = offsets.to(get_best_device())
     return super().forward(ind, offsets)
示例#8
0
 def __init__(self, margin=0.05):
     self.zeros = torch.zeros(1, device=get_best_device())
     self.margin = margin
示例#9
0
 def __init__(self, fastTextModel, random_init=False):
     super().__init__()
     self.embedding_bag = FastTextEmbeddingBag(fastTextModel, random_init)
     self.to(get_best_device())