Пример #1
0
def reproduce(n_epochs=457,
              batch_size=256,
              log_dir="/tmp/run",
              device="cuda",
              debug_loader=None):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        device: Device to train on (either 'cuda' or 'cpu').
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    from torch import optim
    from torch.nn import functional as F
    from torch.optim import lr_scheduler

    from pytorch_generative import datasets
    from pytorch_generative import models
    from pytorch_generative import trainer

    train_loader, test_loader = debug_loader, debug_loader
    if train_loader is None:
        train_loader, test_loader = datasets.get_mnist_loaders(
            batch_size, dynamically_binarize=True)

    model = models.PixelCNN(
        in_channels=1,
        out_channels=1,
        n_residual=15,
        residual_channels=16,
        head_channels=32,
    )
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    scheduler = lr_scheduler.MultiplicativeLR(optimizer,
                                              lr_lambda=lambda _: 0.999977)

    def loss_fn(x, _, preds):
        batch_size = x.shape[0]
        x, preds = x.view((batch_size, -1)), preds.view((batch_size, -1))
        loss = F.binary_cross_entropy_with_logits(preds, x, reduction="none")
        return loss.sum(dim=1).mean()

    trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        lr_scheduler=scheduler,
        log_dir=log_dir,
        device=device,
    )
    trainer.interleaved_train_and_eval(n_epochs)
Пример #2
0
def reproduce(
    n_epochs=50,
    batch_size=512,
    log_dir="/tmp/run",
    n_gpus=1,
    device_id=0,
    debug_loader=None,
):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        n_gpus: Number of GPUs to use for training the model. If 0, uses CPU.
        device_id: The device_id of the current GPU when training on multiple GPUs.
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    from torch import optim
    from torch.nn import functional as F
    from torch.optim import lr_scheduler

    from pytorch_generative import datasets
    from pytorch_generative import models
    from pytorch_generative import trainer

    train_loader, test_loader = debug_loader, debug_loader
    if train_loader is None:
        train_loader, test_loader = datasets.get_mnist_loaders(
            batch_size, dynamically_binarize=True)

    model = models.NADE(input_dim=784, hidden_dim=500)
    optimizer = optim.Adam(model.parameters())

    def loss_fn(x, _, preds):
        batch_size = x.shape[0]
        x, preds = x.view((batch_size, -1)), preds.view((batch_size, -1))
        loss = F.binary_cross_entropy_with_logits(preds, x, reduction="none")
        return loss.sum(dim=1).mean()

    model_trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        log_dir=log_dir,
        n_gpus=n_gpus,
        device_id=device_id,
    )
    model_trainer.interleaved_train_and_eval(n_epochs)
Пример #3
0
def reproduce(
    n_epochs=500,
    batch_size=128,
    log_dir="/tmp/run",
    n_gpus=1,
    device_id=0,
    debug_loader=None,
):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        n_gpus: Number of GPUs to use for training the model. If 0, uses CPU.
        device_id: The device_id of the current GPU when training on multiple GPUs.
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    import torch
    from torch import optim
    from torch.nn import functional as F

    from pytorch_generative import datasets
    from pytorch_generative import models
    from pytorch_generative import trainer

    train_loader, test_loader = debug_loader, debug_loader
    if train_loader is None:
        train_loader, test_loader = datasets.get_mnist_loaders(
            batch_size, dynamically_binarize=True, resize_to_32=True
        )

    stack_configs = [
        StackConfig(n_encoder_blocks=3, n_decoder_blocks=5),
        StackConfig(n_encoder_blocks=3, n_decoder_blocks=5),
        StackConfig(n_encoder_blocks=2, n_decoder_blocks=4),
        StackConfig(n_encoder_blocks=2, n_decoder_blocks=3),
        StackConfig(n_encoder_blocks=2, n_decoder_blocks=2),
        StackConfig(n_encoder_blocks=1, n_decoder_blocks=1),
    ]

    model = models.VeryDeepVAE(
        in_channels=1,
        out_channels=1,
        input_resolution=32,
        stack_configs=stack_configs,
        latent_channels=16,
        hidden_channels=64,
        bottleneck_channels=32,
    )
    optimizer = optim.Adam(model.parameters(), lr=5e-4)

    def loss_fn(x, _, preds):
        preds, kl_div = preds
        recon_loss = F.binary_cross_entropy_with_logits(preds, x, reduction="none")
        recon_loss = recon_loss.sum(dim=(1, 2, 3))
        elbo = recon_loss + kl_div
        return {
            "recon_loss": recon_loss.mean(),
            "kl_div": kl_div.mean(),
            "loss": elbo.mean(),
        }

    def sample_fn(model):
        sample = torch.sigmoid(model.sample(n_samples=16))
        return torch.where(
            sample < 0.5, torch.zeros_like(sample), torch.ones_like(sample)
        )

    model_trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        sample_epochs=1,
        sample_fn=sample_fn,
        log_dir=log_dir,
        n_gpus=n_gpus,
        device_id=device_id,
    )
    model_trainer.interleaved_train_and_eval(n_epochs)
Пример #4
0
def reproduce(
    n_epochs=50, batch_size=512, log_dir="/tmp/run", device="cuda", debug_loader=None
):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        device: Device to train on (either 'cuda' or 'cpu').
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    from torch import optim
    from torch import distributions
    from torch.nn import functional as F
    from torch.optim import lr_scheduler
    from torch.utils import data
    from torchvision import datasets
    from torchvision import transforms

    from pytorch_generative import trainer
    from pytorch_generative import models

    transform = transforms.Compose(
        [transforms.ToTensor(), lambda x: distributions.Bernoulli(probs=x).sample()]
    )
    train_loader = debug_loader or data.DataLoader(
        datasets.MNIST("/tmp/data", train=True, download=True, transform=transform),
        batch_size=batch_size,
        shuffle=True,
        num_workers=8,
    )
    test_loader = debug_loader or data.DataLoader(
        datasets.MNIST("/tmp/data", train=False, download=True, transform=transform),
        batch_size=batch_size,
        num_workers=8,
    )

    model = models.NADE(input_dim=784, hidden_dim=500)
    optimizer = optim.Adam(model.parameters())

    def loss_fn(x, _, preds):
        batch_size = x.shape[0]
        x, preds = x.view((batch_size, -1)), preds.view((batch_size, -1))
        loss = F.binary_cross_entropy_with_logits(preds, x, reduction="none")
        return loss.sum(dim=1).mean()

    model_trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        log_dir=log_dir,
        device=device,
    )
    model_trainer.interleaved_train_and_eval(n_epochs)
Пример #5
0
def reproduce(n_epochs=457,
              batch_size=128,
              log_dir="/tmp/run",
              device="cuda",
              debug_loader=None):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        device: Device to train on (either 'cuda' or 'cpu').
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    from torch import optim
    from torch.nn import functional as F
    from torch.optim import lr_scheduler

    from pytorch_generative import datasets
    from pytorch_generative import models
    from pytorch_generative import trainer

    train_loader, test_loader = debug_loader, debug_loader
    if train_loader is None:
        train_loader, test_loader = datasets.get_cifar10_loaders(
            batch_size, normalize=True)

    model = models.VQVAE2(
        in_channels=3,
        out_channels=3,
        hidden_channels=128,
        residual_channels=64,
        n_residual_blocks=2,
        n_embeddings=512,
        embedding_dim=64,
    )
    optimizer = optim.Adam(model.parameters(), lr=2e-4)
    scheduler = lr_scheduler.MultiplicativeLR(optimizer,
                                              lr_lambda=lambda _: 0.999977)

    def loss_fn(x, _, preds):
        preds, vq_loss = preds
        recon_loss = F.mse_loss(preds, x)
        loss = recon_loss + 0.25 * vq_loss

        return {
            "vq_loss": vq_loss,
            "reconstruction_loss": recon_loss,
            "loss": loss,
        }

    model_trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        lr_scheduler=scheduler,
        log_dir=log_dir,
        device=device,
    )
    model_trainer.interleaved_train_and_eval(n_epochs)
Пример #6
0
def reproduce(
    n_epochs=500, batch_size=128, log_dir="/tmp/run", device="cuda", debug_loader=None
):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        device: Device to train on (either 'cuda' or 'cpu').
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    from torch import optim
    from torch.nn import functional as F

    from pytorch_generative import datasets
    from pytorch_generative import models
    from pytorch_generative import trainer

    train_loader, test_loader = debug_loader, debug_loader
    if train_loader is None:
        train_loader, test_loader = datasets.get_mnist_loaders(
            batch_size,
            resize_to_32=True,
        )

    model = models.VeryDeepVAE(
        in_channels=1,
        out_channels=1,
        input_resolution=32,
        latent_channels=16,
        hidden_channels=32,
        bottleneck_channels=8,
    )
    optimizer = optim.Adam(model.parameters(), lr=1e-3)

    def loss_fn(x, _, preds):
        preds, kl_div = preds
        recon_loss = F.binary_cross_entropy_with_logits(preds, x, reduction="none")
        recon_loss = recon_loss.mean(dim=(1, 2, 3))
        loss = recon_loss + kl_div
        return {
            "recon_loss": recon_loss.mean(),
            "kl_div": kl_div.mean(),
            "loss": loss.mean(),
        }

    def sample_fn(model):
        return torch.sigmoid(model.sample(n_samples=16))

    model_trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        sample_epochs=1,
        sample_fn=sample_fn,
        log_dir=log_dir,
        device=device,
    )
    model_trainer.interleaved_train_and_eval(n_epochs)
Пример #7
0
def reproduce(n_epochs=457,
              batch_size=128,
              log_dir="/tmp/run",
              device="cuda",
              debug_loader=None):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        device: Device to train on (either 'cuda' or 'cpu').
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    from torch import optim
    from torch.nn import functional as F
    from torch.optim import lr_scheduler
    from torch.utils import data
    from torchvision import datasets
    from torchvision import transforms

    from pytorch_generative import trainer
    from pytorch_generative import models

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])
    train_loader = data.DataLoader(
        datasets.CIFAR10("tmp/data",
                         train=True,
                         download=True,
                         transform=transform),
        batch_size=batch_size,
        shuffle=True,
        pin_memory=True,
        num_workers=2,
    )
    test_loader = data.DataLoader(
        datasets.CIFAR10("tmp/data",
                         train=False,
                         download=True,
                         transform=transform),
        batch_size=batch_size,
        pin_memory=True,
        num_workers=2,
    )

    model = models.VQVAE(
        in_channels=3,
        out_channels=3,
        hidden_channels=128,
        residual_channels=32,
        n_residual_blocks=2,
        n_embeddings=512,
        embedding_dim=64,
    )
    optimizer = optim.Adam(model.parameters(), lr=2e-4)
    scheduler = lr_scheduler.MultiplicativeLR(optimizer,
                                              lr_lambda=lambda _: 0.999977)

    def loss_fn(x, _, preds):
        preds, vq_loss = preds
        recon_loss = F.mse_loss(preds, x)
        loss = recon_loss + vq_loss

        return {
            "vq_loss": vq_loss,
            "reconstruction_loss": recon_loss,
            "loss": loss,
        }

    model_trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        lr_scheduler=scheduler,
        log_dir=log_dir,
        device=device,
    )
    model_trainer.interleaved_train_and_eval(n_epochs)
Пример #8
0
def reproduce(n_epochs=457,
              batch_size=128,
              log_dir="/tmp/run",
              device="cuda",
              debug_loader=None):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        device: Device to train on (either 'cuda' or 'cpu').
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    from torch import optim
    from torch.nn import functional as F
    from torch.optim import lr_scheduler
    from torch.utils import data
    from torchvision import datasets
    from torchvision import transforms

    from pytorch_generative import trainer
    from pytorch_generative import models

    transform = transforms.ToTensor()
    train_loader = debug_loader or data.DataLoader(
        datasets.MNIST(
            "/tmp/data", train=True, download=True, transform=transform),
        batch_size=batch_size,
        shuffle=True,
        num_workers=8,
    )
    test_loader = debug_loader or data.DataLoader(
        datasets.MNIST(
            "/tmp/data", train=False, download=True, transform=transform),
        batch_size=batch_size,
        num_workers=8,
    )

    model = models.VAE(
        in_channels=1,
        out_channels=1,
        in_size=28,
        latent_dim=10,
        hidden_channels=32,
        n_residual_blocks=2,
        residual_channels=16,
    )
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    scheduler = lr_scheduler.MultiplicativeLR(optimizer,
                                              lr_lambda=lambda _: 0.999977)

    def loss_fn(x, _, preds):
        preds, vae_loss = preds
        recon_loss = F.binary_cross_entropy_with_logits(preds, x)
        loss = recon_loss * 100 + vae_loss
        return {
            "recon_loss": recon_loss,
            "vae_loss": vae_loss,
            "loss": loss,
        }

    def sample_fn(model):
        return torch.sigmoid(model.sample(n_images=64))

    model_trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        lr_scheduler=scheduler,
        sample_epochs=5,
        sample_fn=sample_fn,
        log_dir=log_dir,
        device=device,
    )
    model_trainer.interleaved_train_and_eval(n_epochs)
def reproduce(n_epochs=457,
              batch_size=128,
              log_dir="/tmp/run",
              device="cuda",
              n_channels=1,
              n_pixel_snail_blocks=1,
              n_residual_blocks=1,
              attention_value_channels=1,
              attention_key_channels=1,
              evalFlag=False,
              evaldir="/tmp/run",
              sampling_part=1):
    """Training script with defaults to reproduce results.

    The code inside this function is self contained and can be used as a top level
    training script, e.g. by copy/pasting it into a Jupyter notebook.

    Args:
        n_epochs: Number of epochs to train for.
        batch_size: Batch size to use for training and evaluation.
        log_dir: Directory where to log trainer state and TensorBoard summaries.
        device: Device to train on (either 'cuda' or 'cpu').
        debug_loader: Debug DataLoader which replaces the default training and
            evaluation loaders if not 'None'. Do not use unless you're writing unit
            tests.
    """
    from torch import optim
    from torch.nn import functional as F
    from torch.optim import lr_scheduler
    from torch.utils import data
    from torchvision import datasets
    from torchvision import transforms

    from pytorch_generative import trainer
    from pytorch_generative import models

    ####################################################################################################################
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~EB~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # Load ImageGPT Data :

    import gmpm

    train = gmpm.train
    test = gmpm.test

    train_loader = data.DataLoader(
        data.TensorDataset(torch.Tensor(train), torch.rand(len(train))),
        batch_size=batch_size,
        shuffle=True,
        num_workers=8,
    )
    test_loader = data.DataLoader(
        data.TensorDataset(torch.Tensor(test), torch.rand(len(test))),
        batch_size=batch_size,
        num_workers=8,
    )
    attention_value_channels = attention_value_channels
    attention_key_channels = attention_key_channels

    model = models.PixelSNAIL(
        ####################################################################################################################
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~EB~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # Change Input / Output size :

        # 3 channels - image after clusters mapping function as input to NN :
        in_channels=3,

        # 512 channels - each pixel get probability to get value from 0 to 511
        out_channels=512,

        ####################################################################################################################
        n_channels=n_channels,
        n_pixel_snail_blocks=n_pixel_snail_blocks,
        n_residual_blocks=n_residual_blocks,
        attention_value_channels=attention_value_channels,
        attention_key_channels=attention_key_channels,
    )
    optimizer = optim.Adam(model.parameters(), lr=1e-4)
    scheduler = lr_scheduler.MultiplicativeLR(optimizer,
                                              lr_lambda=lambda _: 0.999977)

    def loss_fn(x, _, preds):

        ####################################################################################################################
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~EB~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # Update loss function to CrossEntropyLoss :

        x = x.long()
        criterion = nn.NLLLoss()
        B, C, D = preds.size()
        preds_2d = preds.view(B, C, D, -1)
        x_2d = x.view(B, D, -1)
        loss = criterion(preds_2d, x_2d.long())

        ####################################################################################################################

        return loss

    _model = model.to(device)
    trainer = trainer.Trainer(
        model=model,
        loss_fn=loss_fn,
        optimizer=optimizer,
        train_loader=train_loader,
        eval_loader=test_loader,
        lr_scheduler=scheduler,
        log_dir=log_dir,
        device=device,
        sample_epochs=5,
        sample_fn=None,
        n_channels=n_channels,
        n_pixel_snail_blocks=n_pixel_snail_blocks,
        n_residual_blocks=n_residual_blocks,
        attention_value_channels=attention_value_channels,
        attention_key_channels=attention_key_channels,
        evalFlag=evalFlag,
        evaldir=evaldir,
        sampling_part=sampling_part)

    trainer.interleaved_train_and_eval(n_epochs)