Beispiel #1
0
    def forward_with_intermediate(
            self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        encoded_x = self.encoder(x)
        decoded_x = self.decoder(encoded_x)

        if self.force_decoded_size_same_as_input:
            decoded_x = crop_or_pad_fun(decoded_x, x.shape[2:])

        return encoded_x, decoded_x
Beispiel #2
0
    def forward(self, x, y):
        mu, logvar = self.encode(x)
        recon = self.decode(mu, logvar, y)

        # make the recon exactly the same size!
        recon = crop_or_pad_fun(recon, x.shape[2:])
        assert recon.shape == x.shape, f'recon ({recon.shape}) and x ({x.shape}) must have the same shape.' \
                                       f'problem with the decoded!'
        return recon, mu, logvar
Beispiel #3
0
    def forward(self, x):
        mu, logvar = self.encode(x)
        z = self.reparameterize(self.training, mu, logvar)

        shape = [z.shape[0], z.shape[1]] + [1] * self.cnn_dim
        nd_z = z.view(shape)
        recon = self.decoder(nd_z)

        # make the recon exactly the same size!
        recon = crop_or_pad_fun(recon, x.shape[2:])
        assert recon.shape == x.shape, f'recon ({recon.shape}) and x ({x.shape}) must have the same shape.' \
                                       f'problem with the decoded!'
        return recon, mu, logvar
Beispiel #4
0
    def forward(self, batch):
        images = batch['images']
        recon, mu, logvar = self.autoencoder.forward(images)
        random_samples = self.autoencoder.sample(len(images))
        random_samples = crop_or_pad_fun(random_samples, images.shape[2:])

        loss = self.autoencoder.loss_function(recon,
                                              images,
                                              mu,
                                              logvar,
                                              kullback_leibler_weight=0.1)
        return {
            'loss': trw.train.OutputLoss(loss),
            'recon': trw.train.OutputEmbedding(recon),
            'random_samples': trw.train.OutputEmbedding(random_samples)
        }
Beispiel #5
0
    model_fn=lambda options: Net(),
    optimizers_fn=lambda datasets, model: trw.train.
    create_adam_optimizers_scheduler_step_lr_fn(datasets=datasets,
                                                model=model,
                                                learning_rate=0.001,
                                                step_size=120,
                                                gamma=0.1))

model.training = False
nb_images = 40

device = trw.train.get_device(model)
latent = torch.randn([nb_images, model.latent_size], device=device)
y = one_hot(torch.ones([nb_images], dtype=torch.long, device=device) * 7, 10)
latent_y = torch.cat([latent, y], dim=1)
latent_y = latent_y.view(latent_y.shape[0], latent_y.shape[1], 1, 1)
generated = model.autoencoder.decoder(latent_y)

fig, axes = plt.subplots(nrows=1,
                         ncols=nb_images,
                         figsize=(nb_images, 2.5),
                         sharey=True)
decoded_images = crop_or_pad_fun(generated, [28, 28])
image_width = decoded_images.shape[2]

for ax, img in zip(axes, decoded_images):
    curr_img = img.detach().to(torch.device('cpu'))
    ax.imshow(curr_img.view((image_width, image_width)), cmap='binary')

plt.show()