Exemple #1
0
 def test_trainer_acgan(self):
     network_params = {
         "generator": {
             "name": ACGANGenerator,
             "args": {"num_classes": 10, "out_channels": 1, "step_channels": 4},
             "optimizer": {
                 "name": Adam,
                 "args": {"lr": 0.0002, "betas": (0.5, 0.999)},
             },
         },
         "discriminator": {
             "name": ACGANDiscriminator,
             "args": {"num_classes": 10, "in_channels": 1, "step_channels": 4},
             "optimizer": {
                 "name": Adam,
                 "args": {"lr": 0.0002, "betas": (0.5, 0.999)},
             },
         },
     }
     losses_list = [
         MinimaxGeneratorLoss(),
         MinimaxDiscriminatorLoss(),
         AuxiliaryClassifierGeneratorLoss(),
         AuxiliaryClassifierDiscriminatorLoss(),
     ]
     trainer = Trainer(
         network_params,
         losses_list,
         sample_size=1,
         epochs=1,
         device=torch.device("cpu"),
     )
     trainer(mnist_dataloader())
Exemple #2
0
 def test_trainer_dcgan(self):
     network_params = {
         "generator": {"name": DCGANGenerator, "args": {"out_channels": 1, "step_channels": 4},
                       "optimizer": {"name": Adam, "args": {"lr": 0.0002, "betas": (0.5, 0.999)}}},
         "discriminator": {"name": DCGANDiscriminator, "args": {"in_channels": 1, "step_channels": 4},
                       "optimizer": {"name": Adam, "args": {"lr": 0.0002, "betas": (0.5, 0.999)}}}
     }
     losses_list = [MinimaxGeneratorLoss(), MinimaxDiscriminatorLoss()]
     trainer = Trainer(network_params, losses_list, sample_size=1, epochs=1,
                       device=torch.device('cpu'))
     trainer(mnist_dataloader())
Exemple #3
0
 def test_trainer_infogan(self):
     network_params = {
         "generator": {
             "name": InfoGANGenerator,
             "args": {
                 "out_channels": 1,
                 "step_channels": 4,
                 "dim_dis": 128,
                 "dim_cont": 128
             },
             "optimizer": {
                 "name": Adam,
                 "args": {
                     "lr": 0.0002,
                     "betas": (0.5, 0.999)
                 },
             },
         },
         "discriminator": {
             "name": InfoGANDiscriminator,
             "args": {
                 "in_channels": 1,
                 "step_channels": 4,
                 "dim_dis": 128,
                 "dim_cont": 128
             },
             "optimizer": {
                 "name": Adam,
                 "args": {
                     "lr": 0.0002,
                     "betas": (0.5, 0.999)
                 },
             },
         },
     }
     losses_list = [MutualInformationPenalty()]
     dis_code = torch.zeros(128, 128)
     dis_code[:, 0] = 1
     trainer = Trainer(
         network_params,
         losses_list,
         sample_size=1,
         epochs=1,
         device=torch.device("cpu"),
         dis_code=dis_code,
         cont_code=torch.rand(128, 128),
     )
     trainer(mnist_dataloader())
Exemple #4
0
 def test_trainer_cgan(self):
     network_params = {
         "generator": {
             "name": ConditionalGANGenerator,
             "args": {
                 "num_classes": 10,
                 "out_channels": 1,
                 "step_channels": 4,
             },
             "optimizer": {
                 "name": Adam,
                 "args": {
                     "lr": 0.0002,
                     "betas": (0.5, 0.999)
                 },
             },
         },
         "discriminator": {
             "name": ConditionalGANDiscriminator,
             "args": {
                 "num_classes": 10,
                 "in_channels": 1,
                 "step_channels": 4,
             },
             "optimizer": {
                 "name": Adam,
                 "args": {
                     "lr": 0.0002,
                     "betas": (0.5, 0.999)
                 },
             },
         },
     }
     losses_list = [
         MinimaxGeneratorLoss(),
         MinimaxDiscriminatorLoss(),
         WassersteinGeneratorLoss(),
         WassersteinDiscriminatorLoss(),
         WassersteinGradientPenalty(),
     ]
     trainer = Trainer(
         network_params,
         losses_list,
         sample_size=1,
         epochs=1,
         device=torch.device("cpu"),
     )
     trainer(mnist_dataloader())
Exemple #5
0
 def test_trainer_dcgan(self):
     network_params = {
         "generator": {
             "name": DCGANGenerator,
             "args": {
                 "out_channels": 1,
                 "step_channels": 4
             },
             "optimizer": {
                 "name": Adam,
                 "args": {
                     "lr": 0.0002,
                     "betas": (0.5, 0.999)
                 },
             },
         },
         "discriminator": {
             "name": DCGANDiscriminator,
             "args": {
                 "in_channels": 1,
                 "step_channels": 4
             },
             "optimizer": {
                 "name": Adam,
                 "args": {
                     "lr": 0.0002,
                     "betas": (0.5, 0.999)
                 },
             },
         },
     }
     losses_list = [
         BoundaryEquilibriumGeneratorLoss(),
         HistoricalAverageGeneratorLoss(),
         FeatureMatchingGeneratorLoss(),
         BoundaryEquilibriumDiscriminatorLoss(),
         HistoricalAverageDiscriminatorLoss(),
     ]
     trainer = Trainer(
         network_params,
         losses_list,
         sample_size=1,
         epochs=1,
         device=torch.device("cpu"),
     )
     trainer(mnist_dataloader())
Exemple #6
0
 def test_trainer_autoencoding_models(self):
     network_params = {
         "generator": {
             "name": AutoEncodingGenerator,
             "args": {
                 "out_channels": 1,
                 "step_channels": 4
             },
             "optimizer": {
                 "name": Adam,
                 "args": {
                     "lr": 0.0002,
                     "betas": (0.5, 0.999)
                 },
             },
         },
         "discriminator": {
             "name": AutoEncodingDiscriminator,
             "args": {
                 "in_channels": 1,
                 "step_channels": 4,
                 "energy": False,
             },
             "optimizer": {
                 "name": Adam,
                 "args": {
                     "lr": 0.0002,
                     "betas": (0.5, 0.999)
                 },
             },
         },
     }
     losses_list = [
         BoundaryEquilibriumGeneratorLoss(),
         BoundaryEquilibriumDiscriminatorLoss(),
         DraganGradientPenalty(),
     ]
     trainer = Trainer(
         network_params,
         losses_list,
         sample_size=1,
         epochs=1,
         device=torch.device("cpu"),
     )
     trainer(mnist_dataloader())
Exemple #7
0
 def test_trainer_autoencoding_eb_models(self):
     network_params = {
         "generator": {
             "name": AutoEncodingGenerator,
             "args": {
                 "out_channels": 1,
                 "step_channels": 4
             },
             "optimizer": {
                 "name": Adam,
                 "args": {
                     "lr": 0.0002,
                     "betas": (0.5, 0.999)
                 },
             },
         },
         "discriminator": {
             "name": AutoEncodingDiscriminator,
             "args": {
                 "in_channels": 1,
                 "step_channels": 4,
                 "embeddings": True,
             },
             "optimizer": {
                 "name": Adam,
                 "args": {
                     "lr": 0.0002,
                     "betas": (0.5, 0.999)
                 },
             },
         },
     }
     losses_list = [
         EnergyBasedGeneratorLoss(),
         EnergyBasedPullingAwayTerm(),
         EnergyBasedDiscriminatorLoss(),
     ]
     trainer = Trainer(
         network_params,
         losses_list,
         sample_size=1,
         epochs=1,
         device=torch.device("cpu"),
     )
     trainer(mnist_dataloader())
    batch_size = 32
    if torch.cuda.is_available():
        device = torch.device('cuda')
        epochs = 2000

    else:
        device = torch.device("cpu")
        epochs = 5

    print('Device:', device)
    print('Epochs:', epochs)
    print('Dataset Total Size:', dataset_size)
    print('Dataset For Validation Remaining:', split)
    print('Batch Size:', batch_size)

    dataloader = data.DataLoader(dataset,
                                 batch_size=batch_size,
                                 sampler=train_sampler)

    lsgan_losses = [
        LeastSquaresGeneratorLoss(),
        LeastSquaresDiscriminatorLoss()
    ]
    trainer = Trainer(dcgan_network,
                      lsgan_losses,
                      sample_size=64,
                      epochs=epochs,
                      device=device)
    trainer(dataloader)
Exemple #9
0
            epochs=args.epochs,
            sample_size=args.sample_size,
            checkpoints=args.checkpoint,
            retain_checkpoints=1,
            recon=args.reconstructions,
        )
    else:
        if args.cpu == 1:
            device = torch.device("cpu")
        else:
            device = torch.device("cuda:0")
        trainer = Trainer(
            network_config,
            losses_list,
            device=device,
            epochs=args.epochs,
            sample_size=args.sample_size,
            checkpoints=args.checkpoint,
            retain_checkpoints=1,
            recon=args.reconstructions,
        )

    # Transforms to get Binarized MNIST
    dataset = dsets.MNIST(root=args.data_dir,
                          train=True,
                          transform=transforms.Compose([
                              transforms.Resize((32, 32)),
                              transforms.Lambda(lambda x: x.convert('1')),
                              transforms.ToTensor()
                          ]),
                          download=True)
Exemple #10
0
    epochs = 20
else:
    device = torch.device("cpu")
    epochs = 20

print("Device: {}".format(device))
print("Epochs: {}".format(epochs))


losses_list = [
    WassersteinGeneratorLoss(),
    WassersteinDiscriminatorLoss(clip=(-0.01, 0.01)),
]

trainer = Trainer(
    network_params, losses_list, sample_size=64, epochs=epochs, device=device
)

trainer(dataloader)

trainer.complete()


# Grab a batch of real images from the dataloader
real_batch = next(iter(dataloader))

# Plot the real images
plt.figure(figsize=(10, 10))
plt.subplot(1, 2, 1)
plt.axis("off")
plt.title("Real Images")
Exemple #11
0
if torch.cuda.is_available():
    device = torch.device("cuda:0")
    # Use deterministic cudnn algorithms
    torch.backends.cudnn.deterministic = True
    epochs = 10
else:
    device = torch.device("cpu")
    epochs = 10

print("Device: {}".format(device))
print("Epochs: {}".format(epochs))

trainer = Trainer(network,
                  losses,
                  sample_size=64,
                  epochs=epochs,
                  device=device)
trainer(loader)

# Grab a batch of real images from the dataloader
real_batch = next(iter(loader))

# Plot the real images
plt.figure(figsize=(10, 10))
plt.subplot(1, 2, 1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(
    np.transpose(
        vutils.make_grid(real_batch[0].to(device)[:64],
        },
        "optimizer": {"name": Adam, "args": {"lr": 0.0002, "betas": (0.5, 0.999)}},
    },
    "discriminator": {
        "name": DCGANDiscriminator,
        "args": {"in_channels": 3, "step_channels": 16},
        "optimizer": {
            "name": Adam,
            "args": {
                "lr": 0.0002,
                "betas": (0.5, 0.999),
            },
        },
    },
}

loss = [MinimaxGeneratorLoss(), MinimaxDiscriminatorLoss()]

trainer = Trainer(
    dcgan,
    loss,
    sample_size=64,
    epochs=100,
    checkpoints=_C.OUTPUT.CHECKPOINT_DIR,
    recon=_C.OUTPUT.PREDICTION_DIR,
    log_dir=_C.OUTPUT.LOG_DIR,
    device=device,
)

trainer(cifar10())
	print('Checkpoint Prefix:', checkpoint_prefix)

	images_dir = Path('./sample_images')
	if not images_dir.is_dir():
		images_dir.mkdir()
	print('Samples image dir:', images_dir)

	log_dir = Path('./log/ACGAN')
	if not log_dir.is_dir():
		log_dir.mkdir()
	print('Log dir:', log_dir)

	dataloader = data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler)

	lsgan_losses = [AuxiliaryClassifierGeneratorLoss(), AuxiliaryClassifierDiscriminatorLoss()]
	trainer = Trainer(
		dcgan_network,
		lsgan_losses,
		sample_size=64,
		epochs=epochs,
		device=device, 
		checkpoints=checkpoint_prefix,
		recon=images_dir,
		log_dir=log_dir,
	)

	print('Continueing from epoch 2000')
	trainer.load_model('model/ACGAN_goodfiles4.model')
	trainer(dataloader)