encoder = Encoder() decoder = Decoder() encoder.to(device) decoder.to(device) print("number of parameters: {}".format(encoder._n_parameters() + decoder._n_parameters())) train_hdf5_file = os.getcwd() + \ f'/Gaussian/data/training_set_64_gaussian1_25000.hdf5' train_loader = load_data_1scale(train_hdf5_file, args.n_train, args.batch_size, singlescale=True) optimizer = torch.optim.Adam(itertools.chain(encoder.parameters(), decoder.parameters()), lr=args.lr, betas=(args.beta1, args.beta2)) def loss_function(recon_x, x, mu, logvar): Recon_loss = F.mse_loss(recon_x.view(-1, 4096), x.view(-1, 4096), size_average=False) mu = mu.reshape(-1, 256) logvar = logvar.reshape(-1, 256) KLD = torch.sum(-0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1), dim=0) return Recon_loss + args.beta_vae * KLD, Recon_loss, KLD
encoder.to(device) decoder.to(device) print("number of parameters: {}".format(encoder._n_parameters()+decoder._n_parameters())) train_hdf5_file = os.getcwd() + \ f'/Gaussian/data/training_set_16_gaussian.hdf5' test_hdf5_file = os.getcwd() + \ f'/Gaussian/data/test_set_16_gaussian.hdf5' train_loader = load_data_1scale(train_hdf5_file, args.n_train, args.batch_size,singlescale=True) with h5py.File(test_hdf5_file, 'r') as f: x_test = f['test'][()] x_test =x_test optimizer= torch.optim.Adam( itertools.chain(encoder.parameters(), decoder.parameters()), lr=args.lr, betas=(args.beta1, args.beta2)) def test(epoch,x_test): encoder.eval() decoder.eval() z = torch.randn(9, 1, 4 ,4).to(device) imgs = decoder(z) samples = np.squeeze(imgs.data.cpu().numpy()) plot_generation(samples,epoch,output_dir,1) real_imgs = x_test[[10,30,50,100]] real_imgs = (torch.FloatTensor(real_imgs)).to(device) encoded_imgs,_,_ = encoder(real_imgs) decoded_imgs = decoder(encoded_imgs) samples_gen = np.squeeze(decoded_imgs.data.cpu().numpy()) samples_real = np.squeeze(real_imgs.data.cpu().numpy())