コード例 #1
0
ファイル: dcgan.py プロジェクト: farzanashfaque/DCGAN
    def train(self, x, num_iter):

        for i in range(self.num_epochs):
            print("Epoch no :" + str(i + 1) + "/" + str(self.num_epochs))

            for j in tqdm(range(num_iter)):

                x1, y = self.gen_data(x, self.batch_size // 2)
                # train the discriminator
                self.discriminator.train_on_batch(x1, y)
                # Freeze the discriminator to train the GAN model
                utils.make_trainable(self.discriminator, False)
                # train the gan model
                inp = utils.gen_noise(self.batch_size // 2)
                labels = np.zeros((self.batch_size // 2, 1))
                self.gan_model.train_on_batch(inp, labels)

                # make the discriminator params back to trainable for the next iteration
                utils.make_trainable(self.discriminator, True)

            #save the weights and plot the results every 10 epochs
            if i % 10 == 0:
                self.gan_model.save_weights(self.save_path + str(i + 1) +
                                            ".h5")
                utils.plot(self.generator)
コード例 #2
0
ファイル: dcgan.py プロジェクト: farzanashfaque/DCGAN
    def gen_data(self, x, size):
        shuffle = np.random.randint(0, x.shape[0], size)
        realimg = x[shuffle]
        noise = utils.gen_noise(size)
        fakeimg = self.generator.predict(noise)
        images = np.concatenate((realimg, fakeimg))
        label1 = np.ones((size, 1))
        label2 = np.zeros((size, 1))
        labels = np.concatenate((label2, label1))

        return images, labels
コード例 #3
0
import torchvision.datasets as dset
from torchvision.utils import save_image
from torchvision import datasets
from torch.utils.data import DataLoader

import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F

from architecture import Feature_extractor, Generator, weights_init
from utils import squared_distances, emd, gen_noise, sinkhorn_divergence, inception_score, IgnoreLabelDataset

img_size = 32
channels = 3
latent_dim = 100

img_shape = (channels, img_size, img_size)
PATH = 'results/umbSD_euclidean_reg_100.0_epoch975.pth'
print('loaded method : ', PATH)
netG = Generator(img_size, channels).cuda()
checkpoint = torch.load(PATH)

netG.load_state_dict(checkpoint['generator_state_dict'])

netG.eval()
z = Variable(gen_noise(20000, latent_dim, cuda=True))
gen_imgs = netG(z)
print ("Calculating Inception Score...")
print (inception_score(gen_imgs, cuda=True, batch_size=32, resize=True, splits=10))
コード例 #4
0
def train(config):
    
    n_epochs = config["n_epochs"]
    batch_size = config["batch_size"]
    lr = config["lr"]
    latent_dim = config["latent_dim"]
    img_size = config["img_size"]
    channels = config["channels"]
    clip_value = config["clip_value"]
    sample_interval = config["sample_interval"]
    n_critic = config["n_critic"]
    k = config["k"]
    reg = config["reg"]
    print("n_epochs, batch_size, lr, latent_dim, img_size, channels, clip_value, sample_interval, n_critic, k, reg :\n", n_epochs, batch_size, lr, latent_dim, img_size, channels, clip_value, sample_interval, n_critic, k, reg)

    img_shape = (channels, img_size, img_size)

    cuda = True if torch.cuda.is_available() else False
    print("CUDA is avalaible :", cuda)
    
    
    # Initialize generator
    netG = Generator(img_size, channels)
    netD = Feature_extractor(img_size, channels)
    if cuda:
        netG.cuda()
        netD.cuda()

    Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    TensorD = torch.cuda.DoubleTensor if cuda else torch.DoubleTensor

    one = torch.tensor(1, dtype=torch.float)
    mone = one * -1

    netG.apply(weights_init)
    netD.apply(weights_init)

    # Configure data loader
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                            download=True, transform=transform)
    dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                                              shuffle=True, num_workers=2)

    # Optimizers
    optimizer_D = torch.optim.RMSprop(netD.parameters(), lr=lr)
    optimizer_G = torch.optim.RMSprop(netG.parameters(), lr=lr)


    # ----------
    #  Training
    # ----------

    batches_done = 0
    for epoch in range(n_epochs):

        data_iter = iter(dataloader)
        i = 0
        while (i < len(dataloader)):

            for j in range(n_critic):
                if i == len(dataloader):
                    break

                (imgs, _) = data_iter.next()
                if i == 0:
                    prec_imgs = imgs
                i += 1

                ########################
                # TRAIN Cost 
                ########################
                optimizer_D.zero_grad()
                # Configure input

                prec_real_imgs = Variable(prec_imgs.type(Tensor))
                real_imgs = Variable(imgs.type(Tensor))
                loss_G_item = 0
                netD.zero_grad()

                # ----- True Images -----
                batch_size = real_imgs.size(0)
                old_batch_size = prec_real_imgs.size(0)
                features_real_imgs = netD(real_imgs).view(batch_size, -1)
                features_prec_real_imgs = netD(prec_real_imgs).view(old_batch_size, -1)

                # ----- Generated Images -----
                noise = torch.FloatTensor(batch_size, latent_dim, 1, 1).type(Tensor).normal_(0, 1)
                G_imgs = Variable(netG(noise), requires_grad=False)  # Freeze G_imgs gradient

                noise2 = torch.FloatTensor(batch_size, latent_dim, 1, 1).type(Tensor).normal_(0, 1)
                G_imgs2 = Variable(netG(noise2), requires_grad=False)  # Freeze G_imgs gradient

                features_G_imgs = netD(G_imgs).view(batch_size, -1)
                features_G_imgs2 = netD(G_imgs2).view(batch_size, -1)

                # ----- Loss -----
                loss_D_ab = sinkhorn_divergence(features_real_imgs, features_G_imgs, reg=reg, cuda=cuda)  # U(a, b)
                loss_D_aa = sinkhorn_divergence(features_prec_real_imgs, features_real_imgs, reg=reg, cuda=cuda)  # U(a,a)
                loss_D_bb = sinkhorn_divergence(features_G_imgs, features_G_imgs2, reg=reg, cuda=cuda)  # U(b,b)

                loss_D = loss_D_ab - 1./2 * loss_D_aa - 1./2 * loss_D_bb


                loss_D.backward(mone)  # mone -> loss_D * -1
                optimizer_D.step()

                for p in netD.parameters(): #Clamp decoder
                    p.data.clamp_(-0.01, 0.01)

                prec_imgs = imgs    

            ########################
            # TRAIN GENERATOR
            ########################
            optimizer_G.zero_grad()
            for _ in range(k):

                # Sample noise as generator input
                z = Variable(gen_noise(batch_size, latent_dim, cuda=cuda), requires_grad=True)
                gen_imgs = netG(z)
                feature_gen_imgs = netD(gen_imgs).view(batch_size, -1)

                z2 = Variable(gen_noise(batch_size, latent_dim, cuda=cuda), requires_grad=True)
                feature_gen_imgs2 = netD(netG(z2)).view(batch_size, -1)

                loss_ab = sinkhorn_divergence(feature_gen_imgs, features_real_imgs.detach(), reg=reg, cuda=cuda)  # U(a,b)
                loss_aa = sinkhorn_divergence(feature_gen_imgs, feature_gen_imgs2, reg=reg, cuda=cuda)  # U(a,a)

                loss_G = (loss_ab - 1./2 * loss_aa)/k
                loss_G_item += loss_G.item()
                loss_G.backward(one)

            optimizer_G.step()
            print(
                "[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
                % (epoch, n_epochs, batches_done % (len(dataloader)/n_critic), (len(dataloader)/n_critic), loss_D.item(), loss_G.item())
            )

            batches_done += 1
        if epoch%sample_interval==0:
            save_image(gen_imgs.data[:64], 
                       os.path.join(config["output_path_imgs"], "epoch_{}.png".format(epoch)), 
                       nrow=8, normalize=True)
            
    torch.save({
                'generator_state_dict': netG.state_dict(),
                'features_state_dict': netD.state_dict(),
                'optimizer_gen_state_dict': optimizer_G.state_dict(),
                'features_state_dict': optimizer_D.state_dict(),
                }, os.path.join(config["output_path_results"], 
                                'umbSD_euclidean_reg_{}_final.pth'.format(reg)))
コード例 #5
0
critic.apply(init_weights)
gen.apply(init_weights)

# configure loss and optimizers
criterion = nn.BCEWithLogitsLoss()
opt_gen = torch.optim.Adam(gen.parameters(), lr=LR, betas=(beta1, beta2))
opt_disc = torch.optim.Adam(critic.parameters(), lr=LR, betas=(beta1, beta2))

# configure tensorboard writer
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha[:6]
logdir = f"/home/bishwarup/GAN_experiments/dcgan/{sha}"
writer = SummaryWriter(log_dir=logdir)

# make a fixed noise to see the generator evolve over time on it
fixed_noise = gen_noise(32, NOISE_DIM, device=device)

# train loop
checkpointer = ModelCheckpoint(logdir, freq=CKPT_FREQ, keep_n=KEEP_LAST_N_CKPT)
best_fid = np.inf
for epoch in range(EPOCHS):
    torch.cuda.empty_cache()

    gen.train()
    critic.train()

    lossD = AverageMeter("LossD")
    lossG = AverageMeter("LossG")

    global_step = 0
コード例 #6
0
gen = gen.apply(init_weights)
critic = critic.apply(init_weights)

# configure loss and optimizers
criterion = nn.BCEWithLogitsLoss()
opt_gen = torch.optim.Adam(gen.parameters(), lr=LR, betas=(beta1, beta2))
opt_disc = torch.optim.Adam(critic.parameters(), lr=LR, betas=(beta1, beta2))

# configure tensorboard writer
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha[:6]
writer = SummaryWriter(log_dir=f"/home/bishwarup/GAN_experiments/dcgan/{sha}")

# make a fixed noise to see the generator evolve over time on it
fixed_noise = gen_noise(32, NOISE_DIM, device=device)

# train loop
gen.train()
critic.train()

for epoch in range(EPOCHS):
    lossD = AverageMeter("LossD")
    lossG = AverageMeter("LossG")

    pbar = tqdm(enumerate(loader))
    for n_iter, (real, _) in pbar:

        real = real.to(device)
        cur_batch_size = real.size(0)