def log_rank_loss(self, y_pos, y_neg, temp=0): M = y_pos.size(0) N = y_neg.size(0) y_pos = self.gamma - y_pos y_neg = self.gamma - y_neg C = int(N / M) y_neg = y_neg.view(C, -1).transpose(0, 1) p = F.softmax(temp * y_neg) loss_pos = torch.sum(F.softplus(-1 * y_pos)) loss_neg = torch.sum(p * F.softplus(y_neg)) loss = (loss_pos + loss_neg) / 2 / M if self.gpu: loss = loss.cuda() return loss
def g_nonsaturating_loss(self, fake_pred): loss = F.softplus(-fake_pred).mean() return loss
def d_logistic_loss(self, real_pred, fake_pred): real_loss = F.softplus(-real_pred) fake_loss = F.softplus(fake_pred) return (real_loss + fake_loss).mean()
def forward(x): return x * F.softplus(x).tanh()
def backward(ctx, grad_output): x = ctx.saved_tensors[0] sx = torch.sigmoid(x) fx = F.softplus(x).tanh() return grad_output * (fx + x * sx * (1 - fx * fx))
def forward(ctx, x): ctx.save_for_backward(x) return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
def forward(self, x): return x * (torch.tanh(F.softplus(x)))
def latent_param(self): return self.latent_mean.mul(1), F.softplus(self.latent_logvar).div(math.log(2)).mul(1)