예제 #1
0
 def test_VAE(self):
     model = models.VAE(
         in_channels=3,
         out_channels=3,
         latent_channels=1,
         strides=[2, 2],
         hidden_channels=1,
         residual_channels=1,
     )
     self._test_multiple_channels(model)
예제 #2
0
 def test_VAE(self):
     model = models.VAE(
         in_channels=3,
         out_channels=3,
         in_size=5,
         latent_dim=1,
         hidden_channels=2,
         n_residual_blocks=1,
         residual_channels=1,
     )
     self._smoke_test(model, in_channels=3, test_sampling=False)
     # TODO(eugenhotaj): Create a function to test VAEs sampling.
     model.sample(n_images=2)
예제 #3
0
def reproduce(n_epochs=457,
              batch_size=128,
              log_dir="/tmp/run",
              device="cuda",
              debug_loader=None):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        device: Device to train on (either 'cuda' or 'cpu').
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    from torch import optim
    from torch.nn import functional as F
    from torch.optim import lr_scheduler
    from torch.utils import data
    from torchvision import datasets
    from torchvision import transforms

    from pytorch_generative import trainer
    from pytorch_generative import models

    transform = transforms.ToTensor()
    train_loader = debug_loader or data.DataLoader(
        datasets.MNIST(
            "/tmp/data", train=True, download=True, transform=transform),
        batch_size=batch_size,
        shuffle=True,
        num_workers=8,
    )
    test_loader = debug_loader or data.DataLoader(
        datasets.MNIST(
            "/tmp/data", train=False, download=True, transform=transform),
        batch_size=batch_size,
        num_workers=8,
    )

    model = models.VAE(
        in_channels=1,
        out_channels=1,
        in_size=28,
        latent_dim=10,
        hidden_channels=32,
        n_residual_blocks=2,
        residual_channels=16,
    )
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    scheduler = lr_scheduler.MultiplicativeLR(optimizer,
                                              lr_lambda=lambda _: 0.999977)

    def loss_fn(x, _, preds):
        preds, vae_loss = preds
        recon_loss = F.binary_cross_entropy_with_logits(preds, x)
        loss = recon_loss * 100 + vae_loss
        return {
            "recon_loss": recon_loss,
            "vae_loss": vae_loss,
            "loss": loss,
        }

    def sample_fn(model):
        return torch.sigmoid(model.sample(n_images=64))

    model_trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        lr_scheduler=scheduler,
        sample_epochs=5,
        sample_fn=sample_fn,
        log_dir=log_dir,
        device=device,
    )
    model_trainer.interleaved_train_and_eval(n_epochs)
예제 #4
0
def reproduce(n_epochs=500,
              batch_size=128,
              log_dir="/tmp/run",
              device="cuda",
              debug_loader=None):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        device: Device to train on (either 'cuda' or 'cpu').
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    from torch import optim
    from torch.nn import functional as F
    from torch.optim import lr_scheduler

    from pytorch_generative import datasets
    from pytorch_generative import models
    from pytorch_generative import trainer

    train_loader, test_loader = debug_loader, debug_loader
    if train_loader is None:
        train_loader, test_loader = datasets.get_mnist_loaders(
            batch_size, dynamically_binarize=True, resize_to_32=True)

    model = models.VAE(
        in_channels=1,
        out_channels=1,
        latent_channels=16,
        strides=[2, 2, 2, 2],
        hidden_channels=64,
        residual_channels=32,
    )
    optimizer = optim.Adam(model.parameters(), lr=5e-4)

    def loss_fn(x, _, preds):
        preds, kl_div = preds
        recon_loss = F.binary_cross_entropy_with_logits(preds,
                                                        x,
                                                        reduction="none")
        recon_loss = recon_loss.sum(dim=(1, 2, 3))
        elbo = recon_loss + kl_div

        return {
            "recon_loss": recon_loss.mean(),
            "kl_div": kl_div.mean(),
            "loss": elbo.mean(),
        }

    def sample_fn(model):
        sample = torch.sigmoid(model.sample(n_samples=16))
        return torch.where(sample < 0.5, torch.zeros_like(sample),
                           torch.ones_like(sample))

    model_trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        sample_epochs=1,
        sample_fn=sample_fn,
        log_dir=log_dir,
        device=device,
    )
    model_trainer.interleaved_train_and_eval(n_epochs)