def log_rank_loss(self, y_pos, y_neg, temp=0):
     M = y_pos.size(0)
     N = y_neg.size(0)
     y_pos = self.gamma - y_pos
     y_neg = self.gamma - y_neg
     C = int(N / M)
     y_neg = y_neg.view(C, -1).transpose(0, 1)
     p = F.softmax(temp * y_neg)
     loss_pos = torch.sum(F.softplus(-1 * y_pos))
     loss_neg = torch.sum(p * F.softplus(y_neg))
     loss = (loss_pos + loss_neg) / 2 / M
     if self.gpu:
         loss = loss.cuda()
     return loss
Example #2
0
File: train.py Project: LFetty/gaga
    def g_nonsaturating_loss(self, fake_pred):
        loss = F.softplus(-fake_pred).mean()

        return loss
Example #3
0
File: train.py Project: LFetty/gaga
    def d_logistic_loss(self, real_pred, fake_pred):
        real_loss = F.softplus(-real_pred)
        fake_loss = F.softplus(fake_pred)

        return (real_loss + fake_loss).mean()
Example #4
0
 def forward(x):
     return x * F.softplus(x).tanh()
Example #5
0
 def backward(ctx, grad_output):
     x = ctx.saved_tensors[0]
     sx = torch.sigmoid(x)
     fx = F.softplus(x).tanh()
     return grad_output * (fx + x * sx * (1 - fx * fx))
Example #6
0
 def forward(ctx, x):
     ctx.save_for_backward(x)
     return x.mul(torch.tanh(F.softplus(x)))  # x * tanh(ln(1 + exp(x)))
Example #7
0
 def forward(self, x):
     return x * (torch.tanh(F.softplus(x)))
Example #8
0
	def latent_param(self):
		return self.latent_mean.mul(1), F.softplus(self.latent_logvar).div(math.log(2)).mul(1)