Exemple #1
0
def run(args):
    tag = 'bamos.smooth-topk.seed={}.{}'.format(args.seed, args.dataset)
    if args.dataset == 'cifar100':
        tag += '.noise={}'.format(args.noise_labels)
    elif args.dataset == 'imagenet':
        tag += '-{}'.format(args.train_size)
    setproctitle(tag)

    set_seed(args.seed)
    xp = create_experiment(args)
    train_loader, val_loader, test_loader = get_loaders(args)
    loss = get_loss(xp, args)

    model = get_model(args)
    if args.load_model:
        load_model(model, args.load_model)

    if args.cuda:
        if args.parallel_gpu:
            model = torch.nn.DataParallel(model).cuda()
        else:
            torch.cuda.set_device(args.device)
            model.cuda()
        loss.cuda()

    optimizer = get_optimizer(model, args.mu, args.lr_0, xp)
    if args.load_optimizer:
        load_optimizer(optimizer, args.load_optimizer, args.lr_0)

    with logger.stdout_to("{}_log.txt".format(args.out_name)):
        clock = -time.time()
        for _ in range(args.epochs):

            xp.Epoch.update(1).log()
            optimizer = update_optimizer(args.lr_schedule, optimizer,
                                         model, loss, xp)

            xp.Learning_Rate.update().log()
            xp.Mu.update().log()
            xp.Temperature.update().log()

            train(model, loss, optimizer, train_loader, xp, args)
            test(model, loss, val_loader, xp, args)

        test(model, loss, test_loader, xp, args)
        clock += time.time()

        print("\nEvaluation time:  \t {0:.2g} min".format(clock * 1. / 60))
Exemple #2
0
def run(args):

    set_seed(args.seed)
    xp = create_experiment(args)
    train_loader, val_loader, test_loader = get_loaders(args)
    loss = get_loss(xp, args)

    model = get_model(args)
    if args.load_model:
        load_model(model, args.load_model)

    if args.cuda:
        if args.parallel_gpu:
            model = torch.nn.DataParallel(model).cuda()
        else:
            torch.cuda.set_device(args.device)
            model.cuda()
        loss.cuda()

    optimizer = get_optimizer(model, args.mu, args.lr_0, xp)
    if args.load_optimizer:
        load_optimizer(optimizer, args.load_optimizer, args.lr_0)

    with logger.stdout_to("{}_log.txt".format(args.out_name)):
        clock = -time.time()
        for _ in range(args.epochs):

            xp.Epoch.update(1).log()
            optimizer = update_optimizer(args.lr_schedule, optimizer, model,
                                         loss, xp)

            xp.Learning_Rate.update().log()
            xp.Mu.update().log()
            xp.Temperature.update().log()

            train(model, loss, optimizer, train_loader, xp, args)
            test(model, loss, val_loader, xp, args)

        test(model, loss, test_loader, xp, args)
        clock += time.time()

        print("\nEvaluation time:  \t {0:.2g} min".format(clock * 1. / 60))
Exemple #3
0
This runs a 4-layer fully connected Generative Adversarial Network
on the MNIST dataset.
"""

import torch
import torchvision
from torch import nn, optim
from torchvision import datasets

import utils
import models
import gantraining

if __name__ == '__main__':
    # Setup a new experiment, keeps results in one folder
    e = utils.create_experiment("experiments/densegan4_mnist")

    # Hyperparameters
    e.params["shuffle"] = True  # Shuffle the contents in the dataset
    e.params["num_workers"] = 4  # Number of worker threads for dataloader
    e.params["batch_size"] = 128  # Size of one batch during training
    e.params[
        "nc"] = 1  # Number of channels in the training images (color RGB uses 3 channels)
    e.params[
        "nz"] = 100  # Size of z latent vector (i.e. size of generator input)
    e.params["ndf"] = 1024  # Number of features in the discriminator network.
    e.params["ngf"] = 256  # Number of features in the generator network.
    e.params["im_size"] = 28  # Size of the images discriminated and generated.
    e.params["num_epochs"] = e.input_int("number of epochs",
                                         5)  # Number of epochs
    e.params["lr"] = 0.0002  # Learning rate for optimizer
Exemple #4
0
on the CIFAR10 dataset.
"""

import torch
import torchvision
from torch import nn, optim
from torchvision import datasets

import utils
import models
import gantraining


if __name__ == '__main__':
    # Setup a new experiment, keeps results in one folder
    e = utils.create_experiment("experiments/dcgan4_celeba")

    # Hyperparameters
    e.params["shuffle"]     = True # Shuffle the contents in the dataset
    e.params["num_workers"] = 4    # Number of worker threads for dataloader
    e.params["batch_size"]  = 128  # Size of one batch during training
    e.params["nc"]          = 3    # Number of channels in the training images (color RGB uses 3 channels)
    e.params["nz"]          = 100  # Size of z latent vector (i.e. size of generator input)
    e.params["im_size"]     = 64   # Size of the images discriminated and generated.
    e.params["num_epochs"]  = e.input_int("number of epochs", 5) # Number of epochs 
    e.params["lr"]          = 0.0002       # Learning rate for optimizer
    e.params["betas"]       = (0.5, 0.999) # Betas hyperparameter for Adam optimizers
    e.params["patience"]    = 7 # Number of epochs to wait before early stopping

    # Setup the CIFAR10 dataset
    transform = utils.image_transform(e.params["im_size"])
import models
import gantraining


def plot_images(image_tensor, filename):
    image = vutils.make_grid(image_tensor, padding=2, nrow=10, normalize=True).detach().cpu()
    plt.figure(figsize=(10, 10))
    plt.axis("off")
    plt.imshow(np.transpose(image, (1, 2, 0)))
    plt.savefig(e.fname(filename), bbox_inches="tight", pad_inches=0.2)

    
if __name__ == '__main__':
    matplotlib.rcParams["image.interpolation"] = 'nearest'
    # Setup a new experiment, keeps results in one folder
    e = utils.create_experiment("experiments/dcgan3_cifar10")

    # Hyperparameters
    e.params["shuffle"]     = True # Shuffle the contents in the dataset
    e.params["num_workers"] = 4    # Number of worker threads for dataloader
    e.params["batch_size"]  = 128  # Size of one batch during training
    e.params["nc"]          = 3    # Number of channels in the training images (color RGB uses 3 channels)
    e.params["nz"]          = 100  # Size of z latent vector (i.e. size of generator input)
    e.params["im_size"]     = 32   # Size of the images discriminated and generated.
    e.params["num_epochs"]  = 0 # Number of epochs 
    e.params["lr"]          = 0.0002       # Learning rate for optimizer
    e.params["betas"]       = (0.5, 0.999) # Betas hyperparameter for Adam optimizers
    e.params["patience"]    = 7 # Number of epochs to wait before early stopping

    # Setup the two models
    e.generator = models.dcgan3_generator(e)
Exemple #6
0
This runs a 3 convolutional layer Generative Adversarial Network
on the CIFAR10 dataset.
"""

import torch
import torchvision
from torch import nn, optim
from torchvision import datasets

import utils
import models
import gantraining

if __name__ == '__main__':
    # Setup a new experiment, keeps results in one folder
    e = utils.create_experiment("experiments/vae_cifar10")

    # Hyperparameters
    e.params["shuffle"] = True  # Shuffle the contents in the dataset
    e.params["num_workers"] = 4  # Number of worker threads for dataloader
    e.params["batch_size"] = 128  # Size of one batch during training
    e.params[
        "nc"] = 3  # Number of channels in the training images (color RGB uses 3 channels)
    e.params[
        "nz"] = 100  # Size of z latent vector (i.e. size of generator input)
    e.params["nef"] = 64  # Number of features in the discriminator network.
    e.params["ndf"] = 64  # Number of features in the generator network.
    e.params["mean"] = 0.0  # Mean value of
    e.params["stdev"] = 1.0  #
    e.params["im_size"] = 32  # Size of the images discriminated and generated.
    e.params["num_epochs"] = e.input_int("number of epochs",
Exemple #7
0
on the CIFAR10 dataset.
"""

import torch
import torchvision
from torch import nn, optim
from torchvision import datasets

import utils
import models
import gantraining


if __name__ == '__main__':
    # Setup a new experiment, keeps results in one folder
    e = utils.create_experiment("experiments/dcgan4_stl-10")

    # Hyperparameters
    e.params["shuffle"]     = True # Shuffle the contents in the dataset
    e.params["num_workers"] = 4    # Number of worker threads for dataloader
    e.params["batch_size"]  = 128  # Size of one batch during training
    e.params["nc"]          = 3    # Number of channels in the training images (color RGB uses 3 channels)
    e.params["nz"]          = 100  # Size of z latent vector (i.e. size of generator input)
    e.params["im_size"]     = 96   # Size of the images discriminated and generated.
    e.params["num_epochs"]  = e.input_int("number of epochs", 5) # Number of epochs 
    e.params["lr"]          = 0.0002       # Learning rate for optimizer
    e.params["betas"]       = (0.5, 0.999) # Betas hyperparameter for Adam optimizers
    e.params["patience"]    = 7 # Number of epochs to wait before early stopping

    # Setup the CIFAR10 dataset
    transform = utils.image_transform(e.params["im_size"])
Exemple #8
0
on the CIFAR10 dataset.
"""

import torch
import torchvision
from torch import nn, optim
from torchvision import datasets

import utils
import models
import gantraining


if __name__ == '__main__':
    # Setup a new experiment, keeps results in one folder
    e = utils.create_experiment("experiments/dcgan2_mnist")

    # Optionally load a pretrained model
    pretrained = False
    pretrained_path = "models/best_model.pth.tar"

    # Hyperparameters
    e.params["shuffle"]     = True # Shuffle the contents in the dataset
    e.params["num_workers"] = 4    # Number of worker threads for dataloader
    e.params["batch_size"]  = 128  # Size of one batch during training
    e.params["nc"]          = 1    # Number of channels in the training images (color RGB uses 3 channels)
    e.params["nz"]          = 100  # Size of z latent vector (i.e. size of generator input)
    e.params["im_size"]     = 28   # Size of the images discriminated and generated.
    e.params["num_epochs"]  = e.input_int("number of epochs", 5) # Number of epochs 
    e.params["lr"]          = 0.0002       # Learning rate for optimizer
    e.params["betas"]       = (0.5, 0.999) # Betas hyperparameter for Adam optimizers