示例#1
0
 def log_rank_loss(self, y_pos, y_neg, temp=0):
     M = y_pos.size(0)
     N = y_neg.size(0)
     y_pos = self.gamma - y_pos
     y_neg = self.gamma - y_neg
     C = int(N / M)
     y_neg = y_neg.view(C, -1).transpose(0, 1)
     p = F.softmax(temp * y_neg)
     loss_pos = torch.sum(F.softplus(-1 * y_pos))
     loss_neg = torch.sum(p * F.softplus(y_neg))
     loss = (loss_pos + loss_neg) / 2 / M
     if self.gpu:
         loss = loss.cuda()
     return loss
示例#2
0
文件: train.py 项目: LFetty/gaga
    def g_nonsaturating_loss(self, fake_pred):
        loss = F.softplus(-fake_pred).mean()

        return loss
示例#3
0
文件: train.py 项目: LFetty/gaga
    def d_logistic_loss(self, real_pred, fake_pred):
        real_loss = F.softplus(-real_pred)
        fake_loss = F.softplus(fake_pred)

        return (real_loss + fake_loss).mean()
示例#4
0
 def forward(x):
     return x * F.softplus(x).tanh()
示例#5
0
 def backward(ctx, grad_output):
     x = ctx.saved_tensors[0]
     sx = torch.sigmoid(x)
     fx = F.softplus(x).tanh()
     return grad_output * (fx + x * sx * (1 - fx * fx))
示例#6
0
 def forward(ctx, x):
     ctx.save_for_backward(x)
     return x.mul(torch.tanh(F.softplus(x)))  # x * tanh(ln(1 + exp(x)))
示例#7
0
 def forward(self, x):
     return x * (torch.tanh(F.softplus(x)))
示例#8
0
	def latent_param(self):
		return self.latent_mean.mul(1), F.softplus(self.latent_logvar).div(math.log(2)).mul(1)