label=i) plt.legend() plt.grid(axis='both') plt.show() if config.btl_size == 2: min_range, max_range = -2., 2. n = 20 step = (max_range - min_range) / float(n) with torch.no_grad(): lines = [] for v1 in np.arange(min_range, max_range, step): # |z| = (20, 2) z = torch.stack([ torch.FloatTensor([v1] * n), torch.FloatTensor([v2 for v2 in np.arange(min_range, max_range, step)]), ], dim=-1) line = torch.clamp(model.decoder(z).view(n, 28, 28), 0, 1) # decoder(|Z|) = (20, 784) -> (20, 28, 28) line = torch.cat([line[i] for i in range(n - 1, 0, -1)], dim=0) # Hidden Space 에 표시된 내용과 같도록 할건데 # 이미지의 경우 좌상단이 가장 작은 값으로 표시됨 # -> 역순으로 for 문을 수행하는 이유임 lines += [line] lines = torch.cat(lines, dim=-1) plt.figure(figsize=(20, 20)) show_image(lines)
if args.cuda: autoencoder = autoencoder.cuda() for epoch in range(1, args.epochs + 1): autoencoder.train() train_loss = 0 mnist_data = list(iter(train_loader)) for batch_idx in range(0, 1000): data = torch.FloatTensor(mnist_data[batch_idx][0]) data = Variable(data) if args.cuda: data = data.cuda() optimizer.zero_grad() recon_batch, mu, logvar, encoded_rep = autoencoder(data) if args.hmc: init_x = encoded_rep.data.cpu().numpy() hmc.get_hmc_sample(init_x, data, gpu=args.cuda) optimizer.step() if epoch % args.evaluate_interval == 0: print("evaluating model") evaluate_autoencoder(epoch) sample = Variable(torch.randn(1, args.output_size)) if args.cuda: sample = sample.cuda() sample = autoencoder.decoder(sample).cpu() save_image(sample.data.view(-1, 1, args.input_size, args.input_size), \ 'results/sample_'+str(epoch)+'.png')