def plot_comparison(n_display: int, x_og: torch.Tensor, p_x: D.Distribution, input_dims: Tuple[int]) -> Figure: fig = plt.figure(figsize=(6, 6)) gs = fig.add_gridspec(4, n_display, width_ratios=[1] * n_display, height_ratios=[1, 1, 1, 1]) gs.update(wspace=0, hspace=0) x_hat = batch_reshape(p_x.sample(), input_dims).clip(0, 1) x_mu = batch_reshape(p_x.mean, input_dims).clip(0, 1) x_var = batch_reshape(p_x.variance, input_dims).clip(0, 1) for n in range(n_display): for k in range(4): ax = plt.subplot(gs[k, n]) ax = disable_ticks(ax) # Original # imshow accepts (W, H, C) if k == 0: ax.imshow(x_og[n, :].permute(1, 2, 0), vmin=0, vmax=1) # Mean elif k == 1: ax.imshow(x_mu[n, :].permute(1, 2, 0), vmin=0, vmax=1) # Variance elif k == 2: ax.imshow(x_var[n, :].permute(1, 2, 0)) # Sample elif k == 3: ax.imshow(x_hat[n, :].permute(1, 2, 0), vmin=0, vmax=1) return fig
def plot_comparison(n_display, x_og, p_x, input_dims): fig = plt.figure(figsize=(6, 6)) gs = fig.add_gridspec( 4, n_display, width_ratios=[1] * n_display, height_ratios=[1, 1, 1, 1] ) gs.update(wspace=0, hspace=0) x_hat = batch_reshape(p_x.sample(), input_dims) x_mu = batch_reshape(p_x.mean, input_dims) x_var = batch_reshape(p_x.variance, input_dims) for n in range(n_display): for k in range(4): ax = plt.subplot(gs[k, n]) ax = disable_ticks(ax) # Original if k == 0: ax.imshow(x_og[n, :][0], cmap="binary", vmin=0, vmax=1) # Mean elif k == 1: ax.imshow(x_mu[n, :][0], cmap="binary", vmin=0, vmax=1) # Variance elif k == 2: ax.imshow(x_var[n, :][0], cmap="binary") # Sample elif k == 3: ax.imshow(x_hat[n, :][0], cmap="binary", vmin=0, vmax=1) return fig
def on_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None: if (trainer.current_epoch == 0) | (trainer.current_epoch % self.save_every_epochs == 0): x, y = next(iter(pl_module.val_dataloader())) x, y = x.to(pl_module.device), y.to(pl_module.device) pl_module.eval() with torch.no_grad(): if isinstance(pl_module, VanillaVAE): x_hat, p_x_z, z, q_z_x, p_z = pl_module._run_step(x) elif isinstance(pl_module, V3AE): x_hat, p_x_z, λ, q_λ_z, p_λ, z, q_z_x, p_z = pl_module._run_step( x) x_mean = batch_reshape(p_x_z.mean, pl_module.input_dims) x_var = batch_reshape(p_x_z.variance, pl_module.input_dims) fig = plt.figure() n_display = 4 gs = fig.add_gridspec(4, n_display, width_ratios=[1] * n_display, height_ratios=[1, 1, 1, 1]) gs.update(wspace=0, hspace=0) for n in range(n_display): for k in range(4): ax = plt.subplot(gs[k, n]) ax = disable_ticks(ax) # Original if k == 0: ax.imshow(x[n, :][0].cpu(), cmap="binary", vmin=0, vmax=1) # Mean elif k == 1: ax.imshow(x_mean[n, :][0].cpu(), cmap="binary", vmin=0, vmax=1) # Variance elif k == 2: ax.imshow(x_var[n, :][0].cpu(), cmap="binary") # Sample elif k == 3: ax.imshow(x_hat[n, :][0].cpu(), cmap="binary", vmin=0, vmax=1) str_title = f"{pl_module.__class__.__name__}_images" trainer.logger.experiment.add_image( str_title, plot_to_image(fig), global_step=trainer.global_step, dataformats="CHW", )
def latent_to_moments(model, z): if isinstance(model, VanillaVAE): μ_z, std_z = model.decoder_μ(z), model.decoder_std(z) _, p_x_z = model.sample_generative(μ_z, std_z) elif isinstance(model, V3AE): batch_size = z.shape[1] z = torch.reshape(z, [-1, *model.latent_dims]) μ_z, α_z, β_z = model.decoder_μ(z), model.decoder_α(z), model.decoder_β(z) μ_z = torch.reshape(μ_z, [-1, batch_size, model.input_size]) α_z = torch.reshape(α_z, [-1, batch_size, model.input_size]) β_z = torch.reshape(α_z, [-1, batch_size, model.input_size]) _, p_x_z = model.sample_generative(μ_z, α_z, β_z) return batch_reshape(p_x_z.mean, model.input_dims), batch_reshape( p_x_z.variance, model.input_dims )
def decode_from_latent( model: BaseVAE, z: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: if isinstance(model, VanillaVAE): μ_z, std_z = ( model.decoder_μ(z.reshape(-1, model.latent_size)), model.decoder_std(z.reshape(-1, model.latent_size)), ) x_hat, p_x = model.sample_generative(μ_z, std_z) elif isinstance(model, V3AE): _, μ_z, α_z, β_z = model.parametrise_z(z) x_hat, p_x = model.sample_generative(μ_z, α_z, β_z) x_hat = batch_reshape(x_hat, model.input_dims) x_mu = batch_reshape(p_x.mean, model.input_dims) x_var = batch_reshape(p_x.variance, model.input_dims) return x_hat, x_mu, x_var