def do_vis(epoch): inner_opt.set_freeze_flag(True) for i, (data, _) in tqdm(enumerate(test_loader)): save_image(data[:64].data.cpu(), '%s/dataset-%d-' % (cmd_args.save_dir, i) + str(epoch) + '.png', nrow=8) encoder.eval() data = convert_data(data) best_z, mu, logvar = get_init_posterior(data) recon_batch = decoder(best_z) n = min(data.size(0), 8) comparison = torch.cat([data[:n], recon_batch.view(cmd_args.batch_size, 1, cmd_args.img_size, cmd_args.img_size)[:n]]) save_image(comparison.data.cpu(), '%s/recon-%d-' % (cmd_args.save_dir, i) + str(epoch) + '.png', nrow=n) z = torch.Tensor(64, cmd_args.latent_dim).normal_(0, 1) if cmd_args.ctx == 'gpu': z = z.cuda() sample = decoder(z).view(64, 1, cmd_args.img_size, cmd_args.img_size) save_image(sample.data.cpu(), '%s/prior-%d-' % (cmd_args.save_dir, i) + str(epoch) + '.png', nrow=8) encoder.train() z, _, _ = encoder(data[0:64]) if cmd_args.ctx == 'gpu': z = z.cuda() sample = decoder(z).view(64, 1, cmd_args.img_size, cmd_args.img_size) save_image(sample.data.cpu(), '%s/posterior-%d-' % (cmd_args.save_dir, i) + str(epoch) + '.png', nrow=8) if i + 1 >= cmd_args.vis_num: break
def test(epoch): encoder.eval() inner_opt.set_freeze_flag(True) test_loss = 0 for i, (data, _) in tqdm(enumerate(test_loader)): data = convert_data(data) fz = lambda z, mu, logvar: loss_function(decoder(z), data, mu, logvar) if cmd_args.unroll_test: inner_opt.zero_grad() bak_dict = encoder.diff_var_dict() best_z, mu, logvar = optimize_func(data, encoder, fz, inner_opt, nsteps = cmd_args.unroll_steps) encoder.load_diff_var_dict(bak_dict) else: best_z, mu, logvar = encoder(data) loss = fz(best_z, mu, logvar) recon_batch = decoder(best_z) test_loss += loss.item() * data.shape[0] if i == 0: n = min(data.size(0), 8) comparison = torch.cat([data[:n], recon_batch.view(-1, 1, cmd_args.img_size, cmd_args.img_size)[:n]]) save_image(comparison.data.cpu(), '%s/parametric_op_reconstruction_' % cmd_args.save_dir + str(epoch) + '.png', nrow=n) inner_opt.set_freeze_flag(False) test_loss /= len(test_loader.dataset) msg = 'test epoch %d, average loss %.4f' % (epoch, test_loss) print(msg) return test_loss
def test(epoch): test_loss = 0 encoder.eval() for i, (data, _) in tqdm(enumerate(test_loader)): bak_nu_dict = nu.state_dict() bak_opt_dict = opt_nu.state_dict() data = convert_data(data) fz = lambda z: binary_cross_entropy(decoder(z), data) + log_score( data, z)[0] z_init, mu, logvar = encoder(data) if cmd_args.unroll_test: best_z = optimize_variable(z_init, fz, EuclideanDist, nsteps=cmd_args.unroll_steps) else: best_z = z_init loss = fz(best_z) + kl_loss(mu, logvar) nu.load_state_dict(bak_nu_dict) opt_nu.load_state_dict(bak_opt_dict) recon_batch = decoder(best_z) test_loss += loss.item() * data.shape[0] if i == 0: n = min(data.size(0), 8) comparison = torch.cat([ data[:n].view(-1, 1, cmd_args.img_size, cmd_args.img_size), recon_batch.view(cmd_args.batch_size, 1, cmd_args.img_size, cmd_args.img_size)[:n] ]) save_image(comparison.data.cpu(), '%s/gauss_fenchel_reconstruction_' % cmd_args.save_dir + str(epoch) + '.png', nrow=n) test_loss /= len(test_loader.dataset) msg = 'test epoch %d, average loss %.4f' % (epoch, test_loss) print(msg) return test_loss
def train(epoch): encoder.train() train_loss = 0 pbar = tqdm(train_loader) num_mini_batches = 0 loss_list = [] for (data, _) in pbar: data = convert_data(data) optimizer.zero_grad() # bak_nu_dict = nu.state_dict() fz = lambda z: binary_cross_entropy(decoder(z), data) + log_score( data, z, update=True)[0] z_init, mu, logvar = encoder(data) best_z = optimize_variable(z_init, fz, EuclideanDist, nsteps=cmd_args.unroll_steps, eps=0) kl = kl_loss(mu, logvar) obj = fz(best_z) if num_mini_batches % 1 == 0: loss = kl + obj loss.backward() optimizer.step() recon_loss = binary_cross_entropy(decoder(best_z), data) vae_loss = kl.item() + recon_loss.item() train_loss += loss.item() pbar.set_description('vae loss: %.4f, recon: %.4f, fenchel_obj: %.4f' % (vae_loss, recon_loss.item(), obj.item())) loss_list.append(loss.item()) # nu.load_state_dict(bak_nu_dict) # for _ in range(1): # log_score(data, best_z) num_mini_batches += 1 msg = 'train epoch %d, average loss %.4f' % (epoch, np.mean(loss_list)) print(msg)
def train(epoch): train_loss = 0 encoder.train() pbar = tqdm(train_loader) num_mini_batches = 0 for (data, _) in pbar: data = convert_data(data) optimizer.zero_grad() fz = lambda z, mu, logvar: loss_function(decoder(z), data, mu, logvar) best_z, mu, logvar = optimize_gaussian(data, encoder, fz, inner_opt_class, nsteps = cmd_args.unroll_steps, training=True) loss = fz(best_z, mu, logvar) loss.backward() train_loss += loss.item() optimizer.step() recon_loss = binary_cross_entropy(decoder(mu), data) pbar.set_description('minibatch loss: %.4f, recon: %.4f' % (loss.item(), recon_loss.item())) num_mini_batches += 1 msg = 'train epoch %d, average loss %.4f' % (epoch, train_loss / num_mini_batches) print(msg)
def train(epoch): train_loss = 0 encoder.train() pbar = tqdm(train_loader) num_mini_batches = 0 for (data, _) in pbar: data = convert_data(data) optimizer.zero_grad() inner_opt.zero_grad() fz = lambda z, mu, logvar: loss_function(decoder(z), data, mu, logvar) best_z, mu, logvar = optimize_func(data, encoder, fz, inner_opt, nsteps = cmd_args.unroll_steps) loss = fz(best_z, mu, logvar) loss.backward() train_loss += loss.item() optimizer.step() pbar.set_description('minibatch loss: %.4f' % loss.item()) num_mini_batches += 1 msg = 'train epoch %d, average loss %.4f' % (epoch, train_loss / num_mini_batches) print(msg)
def test(epoch): encoder.eval() test_loss = 0 for i, (data, _) in tqdm(enumerate(test_loader)): data = convert_data(data) fz = lambda z, mu, logvar: loss_function(decoder(z), data, mu, logvar) if cmd_args.unroll_test: best_z, mu, logvar = optimize_gaussian(data, encoder, fz, inner_opt_class, nsteps = cmd_args.unroll_steps, training = True) else: best_z, mu, logvar = encoder(data) loss = fz(best_z, mu, logvar) recon_batch = decoder(best_z) test_loss += loss.item() * data.shape[0] if i == 0: n = min(data.size(0), 8) comparison = torch.cat([data[:n], recon_batch.view(cmd_args.batch_size, 1, cmd_args.img_size, cmd_args.img_size)[:n]]) save_image(comparison.data.cpu(), '%s/unroll_gauss_reconstruction_' % cmd_args.save_dir + str(epoch) + '.png', nrow=n) test_loss /= len(test_loader.dataset) msg = 'test epoch %d, average loss %.4f' % (epoch, test_loss) print(msg) return test_loss