def factorvae_loss_fn(w_tc, model, PermD, optim_PermD, ones, zeros, **kwargs): """ Disentangling by Factorising by Kim and Mnih https://arxiv.org/pdf/1802.05983.pdf :param w_tc: :param model: :param PermD: :param optim_PermD: :param ones: :param zeros: :param kwargs: :return: """ x_true2 = kwargs['x_true2'] label2 = kwargs['label2'] z = kwargs['z'] factorvae_dz_true = PermD(z) vae_tc_loss = (factorvae_dz_true[:, 0] - factorvae_dz_true[:, 1]).mean() * w_tc # Train discriminator of FactorVAE mu2, logvar2 = model.encode(x=x_true2, c=label2) z2 = reparametrize(mu2, logvar2) z2_perm = permute_dims(z2).detach() dz2_perm = PermD(z2_perm) discriminator_tc_loss = (F.cross_entropy(factorvae_dz_true, zeros) + F.cross_entropy(dz2_perm, ones)) * 0.5 optim_PermD.zero_grad() discriminator_tc_loss.backward(retain_graph=True) optim_PermD.step() return vae_tc_loss, discriminator_tc_loss
def vae_base(self, losses, x_true1, x_true2, label1, label2): mu, logvar = self.model.encode(x=x_true1, c=label1) z = reparametrize(mu, logvar) x_recon = self.model.decode(z=z, c=label1) loss_fn_args = dict(x_recon=x_recon, x_true=x_true1, mu=mu, logvar=logvar, z=z, x_true2=x_true2, label2=label2) losses.update(self.loss_fn(losses, **loss_fn_args)) return losses, {'x_recon': x_recon, 'mu': mu, 'z': z, 'logvar': logvar}
def encode_stochastic(self, **kwargs): images = kwargs['images'] if images.dim() == 3: images = images.unsqueeze(0) mu, logvar = self.model.encode(x=images) return reparametrize(mu, logvar)
def forward(self, x, **kwargs): mu, logvar = self.encode(x) z = reparametrize(mu, logvar) return self.decode(z)
def forward(self, x, c): mu_logvar, y_onehot = self.encode(x, encode_c=True) mu, logvar = mu_logvar z = reparametrize(mu, logvar) return self.decode(z=z, c=y_onehot)
def forward(self, x, c): mu, logvar = self.encode(x, c) z = reparametrize(mu, logvar) return self.decode(z, c)