Ejemplo n.º 1
0
def get_generator_model(opt):
    nc = 1 if opt.data.startswith("mnist") else 3
    if opt.model == "DCGAN":
        return DCGAN_G(100, nc, 64)
    elif opt.model == "WGAN":
        return WGAN_G(64, 100, nc, 64, 1)
    elif opt.model == "MDGAN":
        return MDGAN_G(nc)
    elif opt.model == "NNGAN":
        return DCGAN_G(100, nc, 64)
    elif opt.model == "MGGAN":
        return DCGAN_G(100, nc, 64)
    assert (False)
def DCGAN_cluster_main(opt):
    g = Globals()

    N_CLUSTER = 200
    cluster = np.load('/scratch/ys646/gan/features/celeba/clus.npy')

    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = '/scratch/ys646/gan/results/'
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

    # option 1: just don't shuffle, use a counter for indices
    # this is important to keep orders fixed
    opt.workers = 1
    dataset, dataloader = getDataSet(opt, needShuf=False)
    # option 2: shuffle but use a modified dataloader that also output indices

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz + N_CLUSTER, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    class _netD(nn.Module):
        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                # nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                # extra channel for input embedding
                nn.Conv2d(nc + 1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid())
            self.emb = nn.Embedding(N_CLUSTER, opt.imageSize * opt.imageSize)

        def forward(self, input, clus_var):
            out_emb = self.emb.forward(clus_var)
            out_emb = out_emb.view(-1, 1, 64, 64)
            new_input = torch.cat([out_emb, input], 1)
            output = self.main.forward(new_input)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        counter = 0
        for i, data in enumerate(dataloader, 0):
            ############################
            # (0) Get the corresponding clusters
            ###########################
            batch_size = data[1].size(0)
            clus_batch = cluster[counter:counter + batch_size]
            clus_batch = torch.from_numpy(clus_batch).long()
            counter = counter + batch_size

            clus_var = Variable(clus_batch.cuda(), requires_grad=False)
            oh = torch.FloatTensor(batch_size, N_CLUSTER)
            oh.zero_()
            oh.scatter_(1, clus_batch.view(-1, 1), 1)
            oh_var = Variable(oh.cuda(), requires_grad=False)

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input, clus_var)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            # pad noise with one hot
            fake = netG(torch.cat([noise, oh_var], 1))

            label.data.fill_(fake_label)
            output = netD(fake.detach(), clus_var)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake, clus_var)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print(
                '[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                % (epoch, opt.niter, i, len(dataloader), errD.data[0],
                   errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(torch.cat([fixed_noise, oh_var], 1))
                saveImage(fake.data,
                          '%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(),
                   '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
        torch.save(netD.state_dict(),
                   '%s/netD_epoch_%d.pth' % (opt.outf, epoch))

    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Ejemplo n.º 3
0
def MGGAN_main(opt):

    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    nc = 1 if opt.data.startswith("mnist") else 3
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 30
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "MGGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    nloss = 200

    class _netD(nn.Module):

        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)
            )
            self.main2 = nn.Sequential(
                # state size. (ndf*8) x 4 x 4
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(ndf * 8, nloss, 4, 1, 0, bias=False),
                # nn.Linear(ndf*8*4*4,nloss),
                nn.Sigmoid()
            )

        def forward(self, input):
            self.feature = self.main.forward(input)
            # output=self.main2.forward(self.feature.view(input.size(0),-1))
            output = self.main2.forward(self.feature)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize, nloss)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    real_batch = 11
    grow_speed = 5
    for epoch in range(opt.niter):
        if epoch % grow_speed == 0:
            if real_batch > 1:
                real_batch -= 1

            real_inputs = torch.FloatTensor(
                real_batch * opt.batchSize, nc, opt.imageSize, opt.imageSize)
        pointer = 0
        for i, data in enumerate(dataloader, 0):
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            if batch_size < opt.batchSize:
                continue
            pointer = pointer % real_batch + 1

            if pointer < real_batch:  # still need to fill the batch
                # copy data
                real_inputs[
                    pointer * batch_size:(pointer + 1) * batch_size].copy_(real_cpu)
                continue
            # Done collecting! Now we can collect all the feature vectors..
            input.data.resize_(real_inputs.size()).copy_(real_inputs)
            netD(input)
            true_features = netD.feature.view(
                real_inputs.size(0), -1)  # make feature a vector

            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            fake_features = netD.feature.view(batch_size, -1)

            # Now we need to make a pair between pair and true.. run it as a LP
            # program...
            map = solve(fake_features.data, true_features.data)
            input.data.resize_(real_cpu.size())
            for j in range(0, batch_size):
                input.data[j].copy_(real_inputs[map[j]])

            tot_mini_batch = 10
            for mini_batch in range(0, tot_mini_batch):
                label.data.fill_(real_label)
                netD.zero_grad()
                output = netD(input)
                errD_real = criterion(output, label)
                errD_real.backward()
                D_x = output.data.mean()

                fake = netG(noise)
                label.data.fill_(fake_label)
                output = netD(fake.detach())
                errD_fake = criterion(output, label)
                errD_fake.backward()
                D_G_z1 = output.data.mean()

                errD = errD_real + errD_fake
                optimizerD.step()

                ############################
                # (2) Update G network: maximize log(D(G(z)))
                ###########################
                netG.zero_grad()
                # fake labels are real for generator cost
                label.data.fill_(real_label)
                output = netD(fake)
                errG = criterion(output, label)
                errG.backward()
                D_G_z2 = output.data.mean()
                optimizerG.step()

                print('[%d/%d][%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                      % (epoch, opt.niter, i, len(dataloader), mini_batch, tot_mini_batch,
                         errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
            fake = netG(fixed_noise)
            saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                      (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Ejemplo n.º 4
0
def NNGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netF = ''
    opt.netC = ''
    opt.outf = g.default_model_dir + "NNGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    cudnn.be1hmark = True

    dataset, dataloader = getDataSet(opt, needShuf=False)

    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    1 = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(100, 1, 64)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
        print("Load netg")
    print(netG)

    class _netFeature(nn.Module):

        def __init__(self):
            super(_netFeature, self).__init__()
            self.main = nn.Sequential(
                # input is (1) x 64 x 64
                nn.Conv2d(1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
            )

        def forward(self, input):
            output = self.main.forward(input).view(input.size(0), -1)
            # outputN=torch.norm(output,2,1)
            # return output/(outputN.expand_as(output))
            return output

    class _netCv(nn.Module):

        def __init__(self):
            super(_netCv, self).__init__()
            self.main = nn.Sequential(
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid()
            )

        def forward(self, input):
            return self.main(input.view(input.size(0), 512, 4, 4)).view(-1, 1)

    netF = _netFeature()
    netF.apply(weights_init)
    print(netF)
    netC = _netCv()
    netC.apply(weights_init)
    print(netC)
    if opt.netF != '':
        netF.load_state_dict(torch.load(opt.netF))
        print("Load netf")
    if opt.netC != '':
        netC.load_state_dict(torch.load(opt.netC))
        print("Load netc")

    criterion = nn.BCELoss()

    core_batch = 64
    input = torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(core_batch)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netF.cuda()
        netC.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerF = optim.Adam(netF.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerC = optim.Adam(netC.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    core_input = Variable(torch.FloatTensor(
        core_batch, 1, opt.imageSize, opt.imageSize).cuda())

    for epoch in range(opt.niter):

        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            netF.zero_grad()
            netC.zero_grad()

            noise.data.resize_(core_batch, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.resize_(core_batch).fill_(fake_label)
            fake_features = netF(fake.detach())
            output = netC(fake_features)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()

            real_cpu, _ = data
            # We only do full mini-batches, ignore the last mini-batch
            if (real_cpu.size(0) < opt.batchSize):
                print("Skip small mini batch!")
                continue
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            true_features = netF(input)
            M = dista1e(fake_features.data.view(fake_features.size(
                0), -1), true_features.data.view(real_cpu.size(0), -1), False)
            # get the specific neighbors of features in F_true
            _, fake_true_neighbors = torch.min(M, 1)
            unique_nn = np.unique(fake_true_neighbors.numpy()).size
            core_input.data.copy_(torch.index_select(
                real_cpu, 0, fake_true_neighbors.view(-1)))

            true_features = netF(core_input)
            output = netC(true_features)
            label.data.resize_(core_batch).fill_(real_label)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            errD = errD_real + errD_fake
            optimizerF.step()
            optimizerC.step()

            ############################
            # (2) Update G network: DCGAN
            ###########################

            netG.zero_grad()

            # fake labels are real for generator cost
            label.data.fill_(real_label)
            fake_features = netF(fake)
            output = netC(fake_features)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f D(x): %.4f D(G(z)): %.4f, %.4f unique=%d'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], D_x, D_G_z1, D_G_z2, unique_nn))

            if i % 50 == 0:
                saveImage(real_cpu[0:64], '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netF.state_dict(), '%s/netF_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netC.state_dict(), '%s/netC_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Ejemplo n.º 5
0
    Diters = int(opt.Diters)

    workers = int(opt.workers)
    lambda_ = int(opt.lambda_)
    cuda = opt.cuda

    #datapath='../../../../../'
    #f = h5py.File(opt.datapath+'fields_z='+opt.redshift+'.hdf5', 'r')
    #f = f['delta_HI']

    if opt.MLP == True:
        netG = MLP_G(s_sample, nz, nc, ngf, ngpu)
        netD = MLP_D(s_sample, nz, nc, ngf, ngpu)

    else:
        netG = DCGAN_G(s_sample, nz, nc, ngf, ngpu)
        netD = DCGAN_D(s_sample, nz, nc, ndf, ngpu)

    #experiments/ch128_lr0005_tanh/netD_epoch_47.pth
    epoch_load = opt.epoch_st - 1
    wass_loss = []

    if opt.load_weights == True:
        netG.load_state_dict(
            torch.load(opt.experiment + 'netG_epoch_' + str(epoch_load) +
                       '.pth'))
        netD.load_state_dict(
            torch.load(opt.experiment + 'netD_epoch_' + str(epoch_load) +
                       '.pth'))

        wass_loss = pd.read_csv(opt.experiment + 'loss.csv', header=None)
def DCGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 128
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = DCGAN_D(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.to(device)
        netG.to(device)
        criterion.to(device)
        input, label = input.to(device), label.to(device)
        noise, fixed_noise = noise.to(device), fixed_noise.to(device)

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Ejemplo n.º 7
0
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)


# In[16]:


from tqdm import tqdm
if model_name=='DC':
    G = DCGAN_G(isize=img_size, nz=z_size, nc=image_chanel, ngf=hidden_size, ngpu=0)
    G.apply(weights_init)
    D = Sinkhorn_DCGAN_D(isize=img_size, nz=z_size, nc=image_chanel, ndf=hidden_size, ngpu=0, output_dimension=output_dimension)
    D.apply(weights_init)
if model_name=='MLP':
    G = MLP_G(isize=img_size, nz=z_size, nc=image_chanel, ngf=hidden_size, ngpu=0)
    D = Sinkhorn_MLP_D(isize=img_size, nz=z_size, nc=image_chanel, ndf=hidden_size, ngpu=0)
print(G)
print(D)
if use_cuda:
    G.cuda()
    D.cuda()
G_lr = D_lr = 5e-5
optimizers = {
    'D': torch.optim.RMSprop(D.parameters(), lr=D_lr),
    'G': torch.optim.RMSprop(G.parameters(), lr=G_lr)
def DCGAN_cluster_main(opt):
    g = Globals()

    N_CLUSTER = 200
    cluster = np.load('/scratch/ys646/gan/features/celeba/clus.npy')

    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = '/scratch/ys646/gan/results/'
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    # option 1: just don't shuffle, use a counter for indices
    # this is important to keep orders fixed
    opt.workers = 1
    dataset, dataloader = getDataSet(opt, needShuf=False)
    # option 2: shuffle but use a modified dataloader that also output indices

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz + N_CLUSTER, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    class _netD(nn.Module):

        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                # nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                # extra channel for input embedding
                nn.Conv2d(nc + 1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid()
            )
            self.emb = nn.Embedding(N_CLUSTER, opt.imageSize * opt.imageSize)

        def forward(self, input, clus_var):
            out_emb = self.emb.forward(clus_var)
            out_emb = out_emb.view(-1, 1, 64, 64)
            new_input = torch.cat([out_emb, input], 1)
            output = self.main.forward(new_input)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        counter = 0
        for i, data in enumerate(dataloader, 0):
            ############################
            # (0) Get the corresponding clusters
            ###########################
            batch_size = data[1].size(0)
            clus_batch = cluster[counter:counter + batch_size]
            clus_batch = torch.from_numpy(clus_batch).long()
            counter = counter + batch_size

            clus_var = Variable(clus_batch.cuda(), requires_grad=False)
            oh = torch.FloatTensor(batch_size, N_CLUSTER)
            oh.zero_()
            oh.scatter_(1, clus_batch.view(-1, 1), 1)
            oh_var = Variable(oh.cuda(), requires_grad=False)

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input, clus_var)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            # pad noise with one hot
            fake = netG(torch.cat([noise, oh_var], 1))

            label.data.fill_(fake_label)
            output = netD(fake.detach(), clus_var)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake, clus_var)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(torch.cat([fixed_noise, oh_var], 1))
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))

    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Ejemplo n.º 9
0
def DCGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = DCGAN_D(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Ejemplo n.º 10
0
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1  #By default our data has only one channel
    niter = int(opt.niter)
    Diters = int(opt.Diters)

    workers = int(opt.workers)
    lambda_ = int(opt.lambda_)
    cuda = opt.cuda

    #datapath='../../../../../'
    #f = h5py.File(opt.datapath+'fields_z='+opt.redshift+'.hdf5', 'r')
    #f = f['delta_HI']

    netG = DCGAN_G(s_sample, nz, nc, ngf, ngpu, n_extra_layers)
    netD = DCGAN_D(s_sample, nz, nc, ndf, ngpu, n_extra_layers)

    #experiments/ch128_lr0005_tanh/netD_epoch_47.pth
    epoch_load = opt.epoch_st - 1
    wass_loss = []

    if opt.load_weights == True:
        netG.load_state_dict(
            torch.load(opt.experiment + 'netG_epoch_' + str(epoch_load) +
                       '.pth'))
        netD.load_state_dict(
            torch.load(opt.experiment + 'netD_epoch_' + str(epoch_load) +
                       '.pth'))

        #wass_loss=pd.read_csv(opt.experiment +'loss.csv', header=None)
Ejemplo n.º 11
0
def NNGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netF = ''
    opt.netC = ''
    opt.outf = g.default_model_dir + "NNGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    cudnn.benchmark = True

    dataset, dataloader = getDataSet(opt, needShuf=False)

    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(100, nc, 64)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
        print("Load netg")
    print(netG)

    class _netFeature(nn.Module):

        def __init__(self):
            super(_netFeature, self).__init__()
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
            )

        def forward(self, input):
            output = self.main.forward(input).view(input.size(0), -1)
            # outputN=torch.norm(output,2,1)
            # return output/(outputN.expand_as(output))
            return output

    class _netCv(nn.Module):

        def __init__(self):
            super(_netCv, self).__init__()
            self.main = nn.Sequential(
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid()
            )

        def forward(self, input):
            return self.main(input.view(input.size(0), 512, 4, 4)).view(-1, 1)

    netF = _netFeature()
    netF.apply(weights_init)
    print(netF)
    netC = _netCv()
    netC.apply(weights_init)
    print(netC)
    if opt.netF != '':
        netF.load_state_dict(torch.load(opt.netF))
        print("Load netf")
    if opt.netC != '':
        netC.load_state_dict(torch.load(opt.netC))
        print("Load netc")

    criterion = nn.BCELoss()

    core_batch = 64
    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(core_batch)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netF.cuda()
        netC.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerF = optim.Adam(netF.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerC = optim.Adam(netC.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    core_input = Variable(torch.FloatTensor(
        core_batch, nc, opt.imageSize, opt.imageSize).cuda())

    for epoch in range(opt.niter):

        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            netF.zero_grad()
            netC.zero_grad()

            noise.data.resize_(core_batch, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.resize_(core_batch).fill_(fake_label)
            fake_features = netF(fake.detach())
            output = netC(fake_features)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()

            real_cpu, _ = data
            # We only do full mini-batches, ignore the last mini-batch
            if (real_cpu.size(0) < opt.batchSize):
                print("Skip small mini batch!")
                continue
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            true_features = netF(input)
            M = distance(fake_features.data.view(fake_features.size(
                0), -1), true_features.data.view(real_cpu.size(0), -1), False)
            # get the specific neighbors of features in F_true
            _, fake_true_neighbors = torch.min(M, 1)
            unique_nn = np.unique(fake_true_neighbors.numpy()).size
            core_input.data.copy_(torch.index_select(
                real_cpu, 0, fake_true_neighbors.view(-1)))

            true_features = netF(core_input)
            output = netC(true_features)
            label.data.resize_(core_batch).fill_(real_label)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            errD = errD_real + errD_fake
            optimizerF.step()
            optimizerC.step()

            ############################
            # (2) Update G network: DCGAN
            ###########################

            netG.zero_grad()

            # fake labels are real for generator cost
            label.data.fill_(real_label)
            fake_features = netF(fake)
            output = netC(fake_features)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f D(x): %.4f D(G(z)): %.4f, %.4f unique=%d'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], D_x, D_G_z1, D_G_z2, unique_nn))

            if i % 50 == 0:
                saveImage(real_cpu[0:64], '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netF.state_dict(), '%s/netF_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netC.state_dict(), '%s/netC_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
Ejemplo n.º 12
0
def MGGAN_main(opt):

    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    nc = 1 if opt.data.startswith("mnist") else 3
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 30
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "MGGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    nloss = 200

    class _netD(nn.Module):

        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)
            )
            self.main2 = nn.Sequential(
                # state size. (ndf*8) x 4 x 4
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(ndf * 8, nloss, 4, 1, 0, bias=False),
                # nn.Linear(ndf*8*4*4,nloss),
                nn.Sigmoid()
            )

        def forward(self, input):
            self.feature = self.main.forward(input)
            # output=self.main2.forward(self.feature.view(input.size(0),-1))
            output = self.main2.forward(self.feature)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize, nloss)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    real_batch = 11
    grow_speed = 5
    for epoch in range(opt.niter):
        if epoch % grow_speed == 0:
            if real_batch > 1:
                real_batch -= 1

            real_inputs = torch.FloatTensor(
                real_batch * opt.batchSize, nc, opt.imageSize, opt.imageSize)
        pointer = 0
        for i, data in enumerate(dataloader, 0):
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            if batch_size < opt.batchSize:
                continue
            pointer = pointer % real_batch + 1

            if pointer < real_batch:  # still need to fill the batch
                # copy data
                real_inputs[
                    pointer * batch_size:(pointer + 1) * batch_size].copy_(real_cpu)
                continue
            # Done collecting! Now we can collect all the feature vectors..
            input.data.resize_(real_inputs.size()).copy_(real_inputs)
            netD(input)
            true_features = netD.feature.view(
                real_inputs.size(0), -1)  # make feature a vector

            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            fake_features = netD.feature.view(batch_size, -1)

            # Now we need to make a pair between pair and true.. run it as a LP
            # program...
            map = solve(fake_features.data, true_features.data)
            input.data.resize_(real_cpu.size())
            for j in range(0, batch_size):
                input.data[j].copy_(real_inputs[map[j]])

            tot_mini_batch = 10
            for mini_batch in range(0, tot_mini_batch):
                label.data.fill_(real_label)
                netD.zero_grad()
                output = netD(input)
                errD_real = criterion(output, label)
                errD_real.backward()
                D_x = output.data.mean()

                fake = netG(noise)
                label.data.fill_(fake_label)
                output = netD(fake.detach())
                errD_fake = criterion(output, label)
                errD_fake.backward()
                D_G_z1 = output.data.mean()

                errD = errD_real + errD_fake
                optimizerD.step()

                ############################
                # (2) Update G network: maximize log(D(G(z)))
                ###########################
                netG.zero_grad()
                # fake labels are real for generator cost
                label.data.fill_(real_label)
                output = netD(fake)
                errG = criterion(output, label)
                errG.backward()
                D_G_z2 = output.data.mean()
                optimizerG.step()

                print('[%d/%d][%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                      % (epoch, opt.niter, i, len(dataloader), mini_batch, tot_mini_batch,
                         errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
            fake = netG(fixed_noise)
            saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                      (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")