Пример #1
0
def main():

    # Load the data (DataLoader object)
    path_monnet = 'C:/Users/remys/GAN-Art-Monet/img'
    path_pictures = 'C:/Users/remys/GAN-Art-Monet/photo'
    batch_size = 1
    n_epochs = 10
    device = 'cpu'
    dataset = get_data_loader(path_monnet, path_pictures, batch_size)

    # Create Generators and Discriminators and put them on GPU/TPU
    generator_AB = Generator().to(device)
    generator_BA = Generator().to(device)

    discriminator_A = Discriminator().to(device)
    discriminator_B = Discriminator().to(device)

    # Set optimizers
    G_AB_optimizer = torch.optim.Adam(generator_AB.parameters(), lr=2e-4)
    G_BA_optimizer = torch.optim.Adam(generator_BA.parameters(), lr=2e-4)

    D_A_optimizer = torch.optim.Adam(discriminator_A.parameters(), lr=2e-4)
    D_B_optimizer = torch.optim.Adam(discriminator_B.parameters(), lr=2e-4)

    # Set trainer
    trainer = Trainer(
        generator_ab=generator_AB,
        generator_ba=generator_BA,
        discriminator_a=discriminator_A,
        discriminator_b=discriminator_B,
        generator_ab_optimizer=G_AB_optimizer,
        generator_ba_optimizer=G_BA_optimizer,
        discriminator_a_optimizer=D_A_optimizer,
        discriminator_b_optimizer=D_B_optimizer,
        n_epochs=n_epochs,
        dataloader=dataset,
        device=device,
    )

    # Launch Training
    trainer.train()
Пример #2
0
    def build_model(self):
        """Build generator and discriminator."""
        self.generator = Generator(z_dim=self.z_dim, image_size=self.image_size, conv_dim=self.g_conv_dim)\
            .to(self.device)
        self.discriminator = Discriminator(image_size=self.image_size,
                                           conv_dim=self.d_conv_dim).to(
                                               self.device)
        self.g_optimizer = optim.Adam(self.generator.parameters(), self.lr,
                                      [self.beta1, self.beta2])
        self.d_optimizer = optim.Adam(self.discriminator.parameters(), self.lr,
                                      [self.beta1, self.beta2])

        if self.cuda:
            cudnn.benchmark = True
Пример #3
0
from GAN.model import Generator, Discriminator
from GAN.Config import cfg
from GAN.dataset import load_data
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import time

tf.keras.backend.set_floatx('float64')

input_tensor = tf.keras.layers.Input([100])
output_tensors = Generator(input_tensor)
model_generator = tf.keras.Model(input_tensor, output_tensors)

input_tensor = tf.keras.layers.Input([204])
output_tensors = Discriminator(input_tensor)
model_discriminator = tf.keras.Model(input_tensor, output_tensors)

cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=False)
generator_optimizer = tf.keras.optimizers.Adam(learning_rate=10)
discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=10)

checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(
    generator_optimizer=generator_optimizer,
    discriminator_optimizer=discriminator_optimizer,
    generator=model_generator,
    discriminator=model_discriminator)
Пример #4
0
# number of gpu's available
ngpu = 1
# input noise dimension
nz = 100
# number of generator filters
ngf = 64

weights_folder = "./EXP2/weights/"
epoch = 56

n_samples = 1000

# Initialization

model = Generator(ngpu).to(device)

path = weights_folder+'netG_epoch_%i.pth'%epoch

print('==> Resuming from checkpoint..', path)


model.load_state_dict(torch.load(path))
model.eval()
#load(checkpoint)

def main():

    for i in range(1, n_samples // 128):
        noise=torch.randn(128, nz, 1, 1, device=device)
        fake = model(noise)
Пример #5
0
def main(args):

    # Load the data (DataLoader object)
    path_monnet = args.Monet_Path
    path_pictures = args.Pictures_Path
    save_path = args.Save_Path
    batch_size = args.batch_size
    n_epochs = args.epochs
    device = args.device
    dataset = get_data_loader(path_monnet, path_pictures, batch_size)

    # Create Generators and Discriminators and put them on GPU/TPU
    generator_AB = Generator().to(device)
    generator_BA = Generator().to(device)

    discriminator_A = Discriminator().to(device)
    discriminator_B = Discriminator().to(device)

    generator_AB.apply(weights_init_normal)
    generator_BA.apply(weights_init_normal)
    discriminator_A.apply(weights_init_normal)
    discriminator_B.apply(weights_init_normal)

    # Set optimizers

    G_optimizer = torch.optim.Adam(itertools.chain(generator_AB.parameters(),
                                                   generator_BA.parameters()),
                                   lr=2e-4)

    D_optimizer = torch.optim.Adam(itertools.chain(
        discriminator_A.parameters(), discriminator_B.parameters()),
                                   lr=2e-4)

    # Set trainer
    trainer = Trainer(
        generator_ab=generator_AB,
        generator_ba=generator_BA,
        discriminator_a=discriminator_A,
        discriminator_b=discriminator_B,
        generator_optimizer=G_optimizer,
        discriminator_optimizer=D_optimizer,
        n_epochs=n_epochs,
        dataloader=dataset,
        device=device,
    )

    # Launch Training
    trainer.train()

    # Save the model and the loss during training
    # Save logs
    trainer.log.save(os.path.join(save_path, 'save_loss.txt'))
    # Save the model
    torch.save(generator_AB.state_dict(),
               os.path.join(save_path, 'generator_AB.pt'))
    torch.save(generator_BA.state_dict(),
               os.path.join(save_path, 'generator_BA.pt'))
    torch.save(discriminator_A.state_dict(),
               os.path.join(save_path, 'discriminator_A.pt'))
    torch.save(discriminator_B.state_dict(),
               os.path.join(save_path, 'discriminator_B.pt'))
Пример #6
0
                                         batch_size=128,
                                         shuffle=True,
                                         num_workers=2)


# custom weights initialization called on netG and netD
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


netG = Generator(ngpu).to(device)
netG.apply(weights_init)
# load weights to test the model
# netG.load_state_dict(torch.load('weights/netG_epoch_24.pth'))
print(netG)

netD = Discriminator(ngpu).to(device)
netD.apply(weights_init)
# load weights to test the model
# netD.load_state_dict(torch.load('weights/netD_epoch_24.pth'))
print(netD)

criterion = nn.BCELoss()

# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=0.0001, betas=(0.5, 0.999))
                    default=10,
                    help="interval between everytime logging the G/D loss.")
parser.add_argument("--save_interval",
                    type=int,
                    default=625,
                    help="interval to save the models")
opt = parser.parse_args()

img_shape = (opt.channels, opt.img_size, opt.img_size)
device = utils.selectDevice()

# Loss function
adversarial_loss = torch.nn.BCELoss().to(device)

# Initialize generator and discriminator
generator, discriminator = Generator(img_shape).to(device), Discriminator(
    img_shape).to(device)
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)

# Configure data loader
dataset = dataset.CelebA("./hw3_data/face/",
                         utils.faceFeatures[0],
                         transform=transforms.Compose([transforms.ToTensor()]))
dataloader = DataLoader(dataset,
                        batch_size=opt.batch_size,
                        shuffle=True,
                        num_workers=opt.n_cpu)

# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(),