Esempio n. 1
0
    def download(self):
        if not os.path.exists('data/'):
            os.mkdir('data')
        if self.dataset_name == 'lsun':
            if not os.path.exists('data/lsun'):
                download_lsun('data')
                os.system('unzip data/lsun/church_outdoor_train_lmdb.zip')
                os.system(
                    'python3 wrapper/lsun/data.py export church_outdoor_train_lmdb --out_dir data/lsun'
                )
            self.image_tensor = glob('data/lsun/*.webp')
        elif self.dataset_name == 'mnist':
            if not os.path.exists('data/mnist'):
                download_mnist('data')
            self.image_tensor = self.load_mnist()

            # Resize the image first
            print('Resize MNIST image into ', self.resize_length, ' * ',
                  self.resize_length)
            resize_result = np.empty([
                len(self.image_tensor), self.resize_length, self.resize_length,
                1
            ],
                                     dtype=np.float)
            for i in range(len(self.image_tensor)):
                resize_result[i] = np.expand_dims(cv2.resize(
                    self.image_tensor[i],
                    (self.resize_length, self.resize_length)),
                                                  axis=-1)
            self.image_tensor = resize_result
        elif self.dataset_name == 'celeba':
            if not os.path.exists('data/celebA'):
                download_celeb_a('data')
            self.image_tensor = glob('data/celebA/*.jpg')
        else:
            raise Exception('invalid dataset name...')
Esempio n. 2
0
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        img = sample.reshape(128, 128, 3)
        plt.imshow(toimage(img), interpolation='nearest')

    return fig


initializer = tf.contrib.layers.xavier_initializer()
rand_uniform = tf.random_uniform_initializer(-1, 1, seed=2)

X = tf.placeholder(tf.float32, shape=[None, 128, 128, 3])

download_celeb_a("../data")
data_files = glob(os.path.join("../data/celebA/*.jpg"))
len_x_train = len(data_files)
sample = [
    get_image(sample_file, 108, True, 128, is_grayscale=0)
    for sample_file in data_files
]
sample_images = np.array(sample).astype(np.float32)
x_train = sample_images

x_train = normalize(x_train)

theta_A = []
theta_G = []

Esempio n. 3
0
def main(opt):
    writer = SummaryWriter(
        log_dir="logs/pagan/{}/lr={}_beta1={}_al={}_randomSeed={}/".format(
            opt.dataset, opt.lr, opt.beta1, opt.al, opt.manualSeed))

    if opt.dataset in ["imagenet", "folder", "lfw"]:
        # folder dataset
        dataset = dset.ImageFolder(
            root=opt.dataroot,
            transform=transforms.Compose([
                transforms.Resize(opt.imageSize),
                transforms.CenterCrop(opt.imageSize),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ]),
        )
    elif opt.dataset == "lsun":
        dataset = dset.LSUN(
            root=opt.dataroot,
            classes=["bedroom_train"],
            transform=transforms.Compose([
                transforms.Resize(opt.imageSize),
                transforms.CenterCrop(opt.imageSize),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ]),
        )
    elif opt.dataset == "cifar10":
        dataset = dset.CIFAR10(
            root=opt.dataroot,
            download=True,
            transform=transforms.Compose([
                transforms.Resize(opt.imageSize),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ]),
        )
    elif opt.dataset == "mnist":
        dataset = dset.MNIST(
            root=opt.dataroot,
            download=True,
            transform=transforms.Compose([
                transforms.Resize(opt.imageSize),
                transforms.ToTensor(),
                transforms.Normalize((0.5, ), (0.5, )),
            ]),
        )
    elif opt.dataset == "fashionmnist":
        dataset = dset.FashionMNIST(
            root=opt.dataroot,
            download=True,
            transform=transforms.Compose([
                transforms.Resize(opt.imageSize),
                transforms.ToTensor(),
                transforms.Normalize((0.5, ), (0.5, )),
            ]),
        )
    elif opt.dataset == "celebA":
        download_celeb_a("data")
        dataset = dset.ImageFolder(
            root="data/celebA",
            transform=transforms.Compose([
                transforms.Resize(opt.imageSize),
                transforms.CenterCrop(opt.imageSize),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ]),
        )
    elif opt.dataset == "fake":
        dataset = dset.FakeData(
            image_size=(3, opt.imageSize, opt.imageSize),
            transform=transforms.ToTensor(),
        )
    assert dataset
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=opt.batch_size,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

    device = torch.device("cuda:0" if opt.cuda else "cpu")
    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.dataset in {"mnist", "fashionmnist"} else 3

    netG = Generator(ngpu, nc, nz, ngf).to(device)
    netG.apply(weights_init)
    if opt.netG != "":
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = Discriminator(ngpu, nc, ndf).to(device)
    augmentation_level = opt.al
    netD.main.conv1 = spectral_norm(
        nn.Conv2d(nc + augmentation_level, ndf, 3, 1, 1,
                  bias=False)).to(device)
    netD.apply(weights_init)
    if opt.netD != "":
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    fixed_noise = torch.rand(opt.batch_size, nz, device=device) * 2 - 1

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, opt.beta2))
    optimizerG = optim.Adam(netG.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, opt.beta2))

    global_step = 0
    last_augmentation_step = 0
    kid_score_history = []

    for epoch in range(opt.epochs):

        for i, data in enumerate(dataloader, start=0):

            if global_step % opt.augmentation_interval == 0:
                print("Global step: {}. Computing metrics...".format(
                    global_step))
                samples = random.sample(range(len(dataset)), opt.fid_batch)
                real_samples = [dataset[s][0] for s in samples]
                real_samples = torch.stack(real_samples, dim=0).to(device)
                fake_samples = []
                with torch.no_grad():
                    z = torch.rand(opt.fid_batch, nz, device=device) * 2 - 1
                    for k in tqdm(range(opt.fid_batch // opt.batch_size),
                                  desc="Generating fake images"):
                        z_ = z[k * opt.batch_size:(k + 1) * opt.batch_size]
                        fake_samples.append(netG(z_))
                    fake_samples = torch.cat(fake_samples, dim=0).to(device)
                print("Computing KID and FID...")
                kid, fid = compute_metrics(real_samples, fake_samples)
                print("FID: {:.4f}".format(fid))
                writer.add_scalar("metrics/fid", fid, global_step)
                print("KID: {:.4f}".format(kid))
                writer.add_scalar("metrics/kid", kid, global_step)
                if (len(kid_score_history) >= 2 and kid >=
                    (kid_score_history[-1] + kid_score_history[-2]) * 19 /
                        40):  # (last - KID) smaller than 5% of last
                    # TODO decrease generator LR (paper is not clear)
                    augmentation_level += 1
                    last_augmentation_step = global_step
                    netD.main.conv1 = spectral_norm(
                        nn.Conv2d(nc + augmentation_level,
                                  ndf,
                                  3,
                                  1,
                                  1,
                                  bias=False)).to(device)
                    netD.main.conv1.apply(weights_init)
                    optimizerD = optim.Adam(netD.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
                    print("Augmentation level increased to {}".format(
                        augmentation_level))
                    kid_score_history = []
                else:
                    kid_score_history.append(kid)

                writer.add_scalar("augmentation_level", augmentation_level,
                                  global_step)

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real = data[0].to(device)
            batch_size = real.size(0)
            if augmentation_level > 0:
                p = min(0.5 * (global_step - last_augmentation_step) / opt.tr,
                        0.5)
                if augmentation_level > 1:
                    augmentation_bits_old = np.random.randint(
                        0, 2, size=(batch_size, augmentation_level - 1))
                    augmentation_bits_new = np.where(
                        np.random.rand(batch_size, 1) < p,
                        np.ones((batch_size, 1)), np.zeros((batch_size, 1)))
                    augmentation_bits = np.concatenate(
                        (augmentation_bits_old, augmentation_bits_new), axis=1)
                else:
                    augmentation_bits = np.where(
                        np.random.rand(batch_size, 1) < p,
                        np.ones((batch_size, 1)), np.zeros((batch_size, 1)))
            else:
                augmentation_bits = None

            real_augmented, real_labels_augmented = add_channel(
                real, augmentation_bits, real=True)
            output = netD(real_augmented)
            errD_real = criterion(output, real_labels_augmented)
            errD_real.backward()
            D_x = output.mean().item()

            # train with fake
            noise = torch.rand(batch_size, nz, device=device) * 2 - 1
            fake = netG(noise)
            fake_augmented, fake_labels_augmented = add_channel(
                fake, augmentation_bits, real=False)

            output = netD(fake_augmented.detach())
            errD_fake = criterion(output, fake_labels_augmented)
            errD_fake.backward()
            D_G_z1 = output.mean().item()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            output = netD(fake_augmented)
            errG = criterion(output, 1 - fake_labels_augmented
                             )  # fake labels are real for generator cost
            errG.backward()
            D_G_z2 = output.mean().item()
            optimizerG.step()

            if i % opt.log_interval == 0:
                print(
                    "[{}/{}][{}/{}] Loss_D: {:.4f} Loss_G: {:.4f} D(x): {:.4f} D(G(z)): {:.4f} / {:.4f}"
                    .format(
                        epoch,
                        opt.epochs,
                        i,
                        len(dataloader),
                        errD.item(),
                        errG.item(),
                        D_x,
                        D_G_z1,
                        D_G_z2,
                    ))
                writer.add_scalar("discriminator/loss", errD.item(),
                                  global_step)
                writer.add_scalar("generator/loss", errG.item(), global_step)
                writer.add_scalar("discriminator/mean", D_x, global_step)
                writer.add_scalar("generator/mean1", D_G_z1, global_step)
                writer.add_scalar("generator/mean2", D_G_z2, global_step)

            if i % opt.save_interval == 0 or i == len(dataloader) - 1:
                if global_step == 0:
                    x = vutils.make_grid(real, normalize=True)
                    writer.add_image('Real images', x, global_step)
                x = vutils.make_grid(fake, normalize=True)
                writer.add_image('Generated images', x, global_step)
                vutils.save_image(real,
                                  "%s/real_%s.png" % (opt.outi, opt.dataset),
                                  normalize=True)
                fake = netG(fixed_noise)
                vutils.save_image(
                    fake.detach(),
                    "%s/fake_%s_epoch_%03d.png" %
                    (opt.outi, opt.dataset, epoch),
                    normalize=True,
                )

            global_step += 1
        # do checkpointing
        torch.save(
            netG.state_dict(),
            "%s/netG_%s_last.pth" % (opt.outc, opt.dataset),
        )
        torch.save(
            netD.state_dict(),
            "%s/netD_%s_last.pth" % (opt.outc, opt.dataset),
        )
        if epoch % 20 == 0:
            torch.save(
                netG.state_dict(),
                "%s/netG_%s_epoch_%d.pth" % (opt.outc, opt.dataset, epoch),
            )
            torch.save(
                netD.state_dict(),
                "%s/netD_%s_epoch_%d.pth" % (opt.outc, opt.dataset, epoch),
            )
Esempio n. 4
0
def main(opt):
    writer = SummaryWriter(log_dir="logs/baseline/{}/lr={}_beta1={}_randomSeed={}/".format(opt.dataset, opt.lr, opt.beta1, opt.manualSeed))

    if opt.dataset in ["imagenet", "folder", "lfw"]:
        # folder dataset
        dataset = dset.ImageFolder(
            root=opt.dataroot,
            transform=transforms.Compose(
                [
                    transforms.Resize(opt.imageSize),
                    transforms.CenterCrop(opt.imageSize),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ]
            ),
        )
    elif opt.dataset == "lsun":
        dataset = dset.LSUN(
            root=opt.dataroot,
            classes=["bedroom_train"],
            transform=transforms.Compose(
                [
                    transforms.Resize(opt.imageSize),
                    transforms.CenterCrop(opt.imageSize),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ]
            ),
        )
    elif opt.dataset == "cifar10":
        dataset = dset.CIFAR10(
            root=opt.dataroot,
            download=True,
            transform=transforms.Compose(
                [
                    transforms.Resize(opt.imageSize),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ]
            ),
        )
    elif opt.dataset == "mnist":
        dataset = dset.MNIST(
            root=opt.dataroot,
            download=True,
            transform=transforms.Compose(
                [
                    transforms.Resize(opt.imageSize),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5,), (0.5,)),
                ]
            ),
        )
    elif opt.dataset == "fashionmnist":
        dataset = dset.FashionMNIST(
            root=opt.dataroot,
            download=True,
            transform=transforms.Compose(
                [
                    transforms.Resize(opt.imageSize),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5,), (0.5,)),
                ]
            ),
        )
    elif opt.dataset == "celebA":
        download_celeb_a("data")
        dataset = dset.ImageFolder(
            root="data/celebA",
            transform=transforms.Compose(
                [
                    transforms.Resize(opt.imageSize),
                    transforms.CenterCrop(opt.imageSize),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ]
            ),
        )
    elif opt.dataset == "fake":
        dataset = dset.FakeData(
            image_size=(3, opt.imageSize, opt.imageSize),
            transform=transforms.ToTensor(),
        )
    assert dataset
    dataloader = torch.utils.data.DataLoader(
        dataset, batch_size=opt.batch_size, shuffle=True, num_workers=int(opt.workers)
    )

    device = torch.device("cuda:0" if opt.cuda else "cpu")
    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.dataset in {"mnist", "fashionmnist"} else 3

    netG = Generator(ngpu, nc, nz, ngf).to(device)
    netG.apply(weights_init)
    if opt.netG != "":
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = Discriminator(ngpu, nc, ndf).to(device)
    netD.apply(weights_init)
    if opt.netD != "":
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    fixed_noise = torch.rand(opt.batch_size, nz, device=device)*2-1
    real_label = 1
    fake_label = 0

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))

    global_step = 0

    for epoch in range(opt.epochs):

        for i, data in enumerate(dataloader, start=0):

            if global_step % opt.fid_interval == 0:
                print("Global step: {}. Computing metrics...".format(global_step))
                samples = random.sample(range(len(dataset)), opt.fid_batch)
                real_samples = [dataset[s][0] for s in samples]
                real_samples = torch.stack(real_samples, dim=0).to(device)
                fake_samples = []
                with torch.no_grad():
                    z = torch.rand(opt.fid_batch, nz, device=device)*2-1
                    for k in tqdm(range(opt.fid_batch // opt.batch_size), desc="Generating fake images"):
                        z_ = z[k * opt.batch_size : (k + 1) * opt.batch_size]
                        fake_samples.append(netG(z_))
                    fake_samples = torch.cat(fake_samples, dim=0).to(device)
                print("Computing KID and FID...")                
                kid, fid = compute_metrics(real_samples, fake_samples)
                print("FID: {:.4f}".format(fid))
                writer.add_scalar("metrics/fid", fid, global_step)
                print("KID: {:.4f}".format(kid))
                writer.add_scalar("metrics/kid", kid, global_step)

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_images = data[0].to(device)
            batch_size = real_images.size(0)
            label = torch.full((batch_size,), real_label, device=device)

            output = netD(real_images)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.mean().item()

            # train with fake
            noise = torch.rand(batch_size, nz, device=device)*2-1
            fake = netG(noise)
            label.fill_(fake_label)
            output = netD(fake.detach())
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.mean().item()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            label.fill_(real_label)  # fake labels are real for generator cost
            output = netD(fake)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.mean().item()
            optimizerG.step()

            if i % opt.log_interval == 0:
                print(
                    "[{}/{}][{}/{}] Loss_D: {:.4f} Loss_G: {:.4f} D(x): {:.4f} D(G(z)): {:.4f} / {:.4f}".format(
                        epoch,
                        opt.epochs,
                        i,
                        len(dataloader),
                        errD.item(),
                        errG.item(),
                        D_x,
                        D_G_z1,
                        D_G_z2,
                    )
                )
                writer.add_scalar("discriminator/loss", errD.item(), global_step)
                writer.add_scalar("generator/loss", errG.item(), global_step)
                writer.add_scalar("discriminator/mean", D_x, global_step)
                writer.add_scalar("generator/mean1", D_G_z1, global_step)
                writer.add_scalar("generator/mean2", D_G_z2, global_step)

            if i % opt.save_interval == 0 or i == len(dataloader)-1:
                if global_step == 0:
                    x = vutils.make_grid(
                        real_images, normalize=True
                    )
                    writer.add_image('Real images', x, global_step)
                x = vutils.make_grid(
                    fake, normalize=True
                )
                writer.add_image('Generated images', x, global_step)
                vutils.save_image(
                    real_images, "%s/real_%s.png" % (opt.outi, opt.dataset), normalize=True
                )
                fake = netG(fixed_noise)
                vutils.save_image(
                    fake.detach(),
                    "%s/fake_%s_epoch_%03d.png" % (opt.outi, opt.dataset, epoch),
                    normalize=True,
                )
            global_step += 1

        # do checkpointing
        torch.save(
            netG.state_dict(),
            "%s/netG_%s_last.pth" % (opt.outc, opt.dataset),
        )
        torch.save(
            netD.state_dict(),
            "%s/netD_%s_last.pth" % (opt.outc, opt.dataset),
        )
        if epoch%20 == 0:
            torch.save(
                netG.state_dict(),
                "%s/netG_%s_epoch_%d.pth" % (opt.outc, opt.dataset, epoch),
            )
            torch.save(
                netD.state_dict(),
                "%s/netD_%s_epoch_%d.pth" % (opt.outc, opt.dataset, epoch),
            )