예제 #1
0
 def get_logprobs(self, f, G, G_normalized, Y_S, FCE = None):        
     if FCE is None:
         FCE = self.FCE
     F = FCE(f, G)
     F_norm = torch.norm(F,p=2, dim =1).unsqueeze(1).expand_as(F)
     F_normalized = F.div(F_norm+ 0.00001) 
     #scores = F.mm(G_normalized.transpose(0,1)) #The implementation of Ross et al., but not consistent with origin paper and would cause large norm feature dominate 
     scores = self.relu( F_normalized.mm(G_normalized.transpose(0,1))  ) *100 # The original paper use cosine simlarity, but here we scale it by 100 to strengthen highest probability after softmax
     softmax = self.softmax(scores)
     logprobs =(softmax.mm(Y_S)+1e-6).log()
     return logprobs
예제 #2
0
 def get_logprobs(self, f, G, G_normalized, Y_S, FCE=None):
     if FCE is None:
         FCE = self.FCE
     F = FCE(f, G)
     F_norm = torch.norm(F, p=2, dim=1).unsqueeze(1).expand_as(F)
     F_normalized = F.div(F_norm + 0.00001)
     scores = self.relu(
         F_normalized.mm(G_normalized.transpose(0, 1))
     ) * 100  # The original paper use cosine simlarity, but here we scale it by 100 to strengthen highest probability after softmax
     softmax = self.softmax(scores)
     logprobs = (softmax.mm(Y_S) + 1e-6).log()
     return logprobs