def show_samples(model, config, pdf=None, visdom=None, size=32, title="Generated images"): '''Plot samples from a generative model in [pdf] and/or in [visdom].''' # Set model to evaluation-mode mode = model.training model.eval() # Generate samples from the model sample, labels = model.sample(size) image_tensor = sample.view(-1, config['channels'], config['size'], config['size']).cpu() # Plot generated images in [pdf] and/or [visdom] if pdf is not None: visual_plt.plot_images_from_tensor(image_tensor, pdf, title=title) if visdom is not None: visual_visdom.visualize_images(tensor=image_tensor, name='generated samples ({})'.format( visdom["graph"]), env=visdom["env"]) # Set model back to initial mode model.train(mode=mode)
def show_reconstruction(model, dataset, config, pdf=None, visdom=None, size=32, task=None, collate_fn=None): '''Plot reconstructed examples by an auto-encoder [model] on [dataset], in [pdf] and/or in [visdom].''' # Set model to evaluation-mode mode = model.training model.eval() # Get data data_loader = utils.get_data_loader(dataset, size, shuffle=False, cuda=model._is_on_cuda(), collate_fn=collate_fn) (data, labels) = next(iter(data_loader)) data, labels = data.to(model._device()), labels.to(model._device()) # Evaluate model with torch.no_grad(): recon_batch, y_hat, mu, logvar, z = model(data, full=True) # Plot original and reconstructed images comparison = torch.cat([ data.view(-1, config['channels'], config['size'], config['size'])[:size], recon_batch.view(-1, config['channels'], config['size'], config['size'])[:size] ]).cpu() image_tensor = comparison.view(-1, config['channels'], config['size'], config['size']) # -number of rows nrow = int(np.ceil(np.sqrt(size * 2))) # -make plots if pdf is not None: task_stm = "" if task is None else " (task {})".format(task) visual_plt.plot_images_from_tensor(image_tensor, pdf, nrow=nrow, title="Reconstructions" + task_stm) if visdom is not None: visual_visdom.visualize_images( tensor=image_tensor, name='Reconstructions ({})'.format(visdom["graph"]), env=visdom["env"], nrow=nrow, ) # Set model back to initial mode model.train(mode=mode)
def show_reconstruction(model, dataset, config, pdf=None, visdom=None, size=32, task=None, collate_fn=None): '''Plot reconstructed examples by an auto-encoder [model] on [dataset], in [pdf] and/or in [visdom].''' cuda = model._is_on_cuda() # Set model to evaluation-mode mode = model.training model.eval() # Get data data_loader = utils.get_data_loader(dataset, size, cuda=cuda, collate_fn=collate_fn) (data, labels) = next(iter(data_loader)) # Evaluate model data = Variable(data, volatile=True).cuda() if cuda else Variable( data, volatile=True) recon_batch, y_hat, mu, logvar, z = model(data, full=True) # Plot original and reconstructed images comparison = torch.cat([ data.view(-1, config['channels'], config['size'], config['size'])[:size], recon_batch.view(-1, config['channels'], config['size'], config['size'])[:size] ]).cpu() image_tensor = comparison.data.view(-1, config['channels'], config['size'], config['size']) if pdf is not None: task_stm = "" if task is None else " (task {})".format(task) visual_plt.plot_images_from_tensor(image_tensor, pdf, nrow=8, title="Reconstructions" + task_stm) if visdom is not None: visual_visdom.visualize_images( tensor=image_tensor, name='reconstructed samples ({})'.format(visdom["graph"]), env=visdom["env"], ) # Set model back to initial mode model.train(mode=mode)