def __call__(self, x, x_recon, mu, log_var, distribution='gaussian'): C = torch.clamp(self.C_max / self.C_stop_iter * self.ITERATIONS, 0, self.C_max.data[0]) C = C.to(get_device()) recon_loss = reconstruction_loss(x, x_recon, distribution) total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, log_var) loss = recon_loss + self.GAMMA * (total_kld - C).abs() return loss
def __call__(self, x, x_recon, z): batch_size = x.shape[0] bias_corr = batch_size * (batch_size - 1) self.reg_weight /= bias_corr recon_loss = reconstruction_loss(x_recon, x) mmd_loss = calc_mmd(z, self.reg_weight) loss = recon_loss + mmd_loss return loss
def train_vae(model, buffer, preprocess, config, device): optimizer = optim.Adam(model.parameters(), lr=0.01) buffer.shuffle() for ep in range(config.tr_epochs): for i in range(0, len(buffer.memory), config.batch_size): x_sliced_list = list(buffer.memory)[i:i + config.batch_size] x = torch.stack([ preprocess(np.copy(img), config.resize) for img in x_sliced_list ]).to(device) optimizer.zero_grad() x_hat, mu, logvar = model.forward(x) loss = reconstruction_loss(x_hat, x) + kld_loss(mu, logvar) loss.backward() optimizer.step() if ep % 100 == 0: print("Epoch {} - Loss = {}".format(ep, loss.item())) return True
def __call__(self, x, x_recon, mu, log_var): recons_loss = reconstruction_loss(x_recon, x) total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, log_var) return recons_loss + total_kld * args['vanilla_vae']['M_N']
def __call__(self, x, x_recon, z, l_lambda, v): mse = reconstruction_loss(x, x_recon) mean = z.mean(dim=0) var = torch.norm((z - mean), dim=1).pow(2).mean() reg = torch.mul(torch.sub(var, v).abs(), l_lambda) return mse + reg
def __call__(self, x, x_recon): return reconstruction_loss(x_recon, x)