def generator_sampler(opt):
    opt.batchSize = 64
    opt.folderSize = 600
    opt.overWrite = False
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    netG = get_generator_model(opt)
    netG.load_state_dict(torch.load(get_generator_loc(opt)))
    netG.eval()

    opt.name = opt.outf + "samples/" + \
        opt.data + "/" + opt.model + str(opt.epoch)
    print_prop(opt)

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    if (os.path.exists(opt.name)) and (not opt.overWrite):
        if (os.path.exists(opt.name + "/mark")):  # indeed finished
            print("Sampling already finished before. Now pass.")
            saveFeature(opt.name, opt, opt.feature_model)
            return
        else:
            print("Partially finished. Now rerun. ")

    mkdir(opt.name)
    netG.cuda()

    noise = Variable(torch.FloatTensor(opt.batchSize, 100, 1, 1).cuda())

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    for subfolder in range(0, 1 + opt.sampleSize // opt.folderSize):
        mkdir(opt.name + "/" + str(subfolder))
        for i in range(0, 1 + opt.folderSize // opt.batchSize):
            noise.data.normal_(0, 1)
            fake = netG(noise)
            for j in range(0, len(fake.data)):
                saveImage(
                    fake.data[j], opt.name + "/" + str(subfolder) + "/" +
                    giveName(iter) + ".png")
                iter += 1
                if iter % opt.folderSize == 0 or iter >= opt.sampleSize:
                    break
            if iter % opt.folderSize == 0 or iter >= opt.sampleSize:
                break
        if iter >= opt.sampleSize:
            break

    if opt.dataset == 'mnist_s':
        print("Warning: subclass experiment.. Not saving features..")
    else:
        saveFeature(opt.name, opt, opt.feature_model)
    peek(opt.data, opt.model + str(opt.epoch))

    with open(opt.name + "/mark", "w") as f:
        f.write("")
def generator_sampler(opt):
    opt.batchSize = 64
    opt.folderSize = 600
    opt.overWrite = False
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    netG = get_generator_model(opt)
    netG.load_state_dict(torch.load(get_generator_loc(opt)))
    netG.eval()

    opt.name = opt.outf + "samples/" + \
        opt.data + "/" + opt.model + str(opt.epoch)
    print_prop(opt)

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    if (os.path.exists(opt.name)) and (not opt.overWrite):
        if (os.path.exists(opt.name + "/mark")):  # indeed finished
            print("Sampling already finished before. Now pass.")
            saveFeature(opt.name, opt, opt.feature_model)
            return
        else:
            print("Partially finished. Now rerun. ")

    mkdir(opt.name)
    netG.cuda()

    noise = Variable(torch.FloatTensor(opt.batchSize, 100, 1, 1).cuda())

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    for subfolder in range(0, 1 + opt.sampleSize // opt.folderSize):
        mkdir(opt.name + "/" + str(subfolder))
        for i in range(0, 1 + opt.folderSize // opt.batchSize):
            noise.data.normal_(0, 1)
            fake = netG(noise)
            for j in range(0, len(fake.data)):
                saveImage(fake.data[j], opt.name + "/" +
                          str(subfolder) + "/" + giveName(iter) + ".png")
                iter += 1
                if iter % opt.folderSize == 0 or iter >= opt.sampleSize:
                    break
            if iter % opt.folderSize == 0 or iter >= opt.sampleSize:
                break
        if iter >= opt.sampleSize:
            break

    if opt.dataset == 'mnist_s':
        print("Warning: subclass experiment.. Not saving features..")
    else:
        saveFeature(opt.name, opt, opt.feature_model)
    peek(opt.data, opt.model + str(opt.epoch))

    with open(opt.name + "/mark", "w") as f:
        f.write("")
示例#3
0
def subclass_sampler(opt):
    assert (opt.data == 'mnist')
    opt.workers = 2
    opt.imageSize = 64
    opt.batchSize = 600
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    print_prop(opt)

    saved = []
    for i in range(0, 10):
        saved.append([])
    opt.outTrue9 = opt.outf + "samples/" + opt.data + "9/true"
    if (os.path.exists(opt.outTrue9)):
        if (os.path.exists(opt.outTrue9 + "/mark")):  # indeed finished
            print("Already generated before. Now exit.")
            return
        else:
            print("Partially finished. Now rerun. ")

    dataset, dataloader = getDataSet(opt)

    for batch_idx, (data, target) in enumerate(dataloader):
        for d, t in zip(data, target):
            saved[t].append(d * 0.3081 + 0.1307)

    opt.data_pre = opt.data
    for i in range(0, 10):

        mkdir(opt.outf + "samples")
        mkdir(opt.outf + "samples/" + opt.data_pre + str(i))
        curFolder = opt.outf + "samples/" + opt.data_pre + str(i) + "/true/"
        mkdir(curFolder)

        def giveName(iter):  # 7 digit name.
            ans = str(iter)
            return '0' * (7 - len(ans)) + ans

        subfolder = -1
        for s in range(0, len(saved[i])):
            if s % 600 == 0:
                subfolder += 1
                mkdir(curFolder + str(subfolder))
            saveImage(saved[i][s] * 2 - 1,
                      curFolder + str(subfolder) + "/" + giveName(s) + ".png")

        peek(opt.data, 'true', True)
        torch.save(saved[i], curFolder + "dat.pth")

        with open(curFolder + "/mark", "w") as f:
            f.write("")
def subclass_sampler(opt):
    assert(opt.data == 'mnist')
    opt.workers = 2
    opt.imageSize = 64
    opt.batchSize = 600
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    print_prop(opt)

    saved = []
    for i in range(0, 10):
        saved.append([])
    opt.outTrue9 = opt.outf + "samples/" + opt.data + "9/true"
    if (os.path.exists(opt.outTrue9)):
        if (os.path.exists(opt.outTrue9 + "/mark")):  # indeed finished
            print("Already generated before. Now exit.")
            return
        else:
            print("Partially finished. Now rerun. ")

    dataset, dataloader = getDataSet(opt)

    for batch_idx, (data, target) in enumerate(dataloader):
        for d, t in zip(data, target):
            saved[t].append(d * 0.3081 + 0.1307)

    opt.data_pre = opt.data
    for i in range(0, 10):

        mkdir(opt.outf + "samples")
        mkdir(opt.outf + "samples/" + opt.data_pre + str(i))
        curFolder = opt.outf + "samples/" + opt.data_pre + str(i) + "/true/"
        mkdir(curFolder)

        def giveName(iter):  # 7 digit name.
            ans = str(iter)
            return '0' * (7 - len(ans)) + ans

        subfolder = -1
        for s in range(0, len(saved[i])):
            if s % 600 == 0:
                subfolder += 1
                mkdir(curFolder + str(subfolder))
            saveImage(saved[i][s] * 2 - 1, curFolder +
                      str(subfolder) + "/" + giveName(s) + ".png")

        peek(opt.data, 'true', True)
        torch.save(saved[i], curFolder + "dat.pth")

        with open(curFolder + "/mark", "w") as f:
            f.write("")
示例#5
0
def noise_sampler(opt):

    opt.batchSize = 64
    opt.folderSize = 600
    opt.overWrite = False
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)

    opt.name = opt.outf + "samples/noise/true"
    print_prop(opt)

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    if (os.path.exists(opt.name)) and (not opt.overWrite):
        if (os.path.exists(opt.name + "/mark")):  # indeed finished
            print("Already generated before. Now exit.")
            return
        else:
            print("Partially finished. Now rerun. ")
    mkdir(opt.name)

    noise = Variable(torch.FloatTensor(opt.batchSize, 3, 64, 64).cuda())

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    for subfolder in range(0, 1 + opt.sampleSize // opt.folderSize):
        mkdir(opt.name + "/" + str(subfolder))
        for i in range(0, 1 + opt.folderSize // opt.batchSize):
            noise.data.normal_(0, 1)
            for j in range(0, noise.data.size(0)):
                saveImage(
                    noise.data[j], opt.name + "/" + str(subfolder) + "/" +
                    giveName(iter) + ".png")
                iter += 1
                if iter % opt.folderSize == 0:
                    break
            if iter % opt.folderSize == 0:
                break
        if iter >= opt.sampleSize:
            break
    saveFeature(opt.name, opt)
    peek(opt.data, opt.model)

    with open(opt.name + "/mark", "w") as f:
        f.write("")
def noise_sampler(opt):

    opt.batchSize = 64
    opt.folderSize = 600
    opt.overWrite = False
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)

    opt.name = opt.outf + "samples/noise/true"
    print_prop(opt)

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    if (os.path.exists(opt.name)) and (not opt.overWrite):
        if (os.path.exists(opt.name + "/mark")):  # indeed finished
            print("Already generated before. Now exit.")
            return
        else:
            print("Partially finished. Now rerun. ")
    mkdir(opt.name)

    noise = Variable(torch.FloatTensor(opt.batchSize, 3, 64, 64).cuda())

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    for subfolder in range(0, 1 + opt.sampleSize // opt.folderSize):
        mkdir(opt.name + "/" + str(subfolder))
        for i in range(0, 1 + opt.folderSize // opt.batchSize):
            noise.data.normal_(0, 1)
            for j in range(0, noise.data.size(0)):
                saveImage(noise.data[j], opt.name + "/" +
                          str(subfolder) + "/" + giveName(iter) + ".png")
                iter += 1
                if iter % opt.folderSize == 0:
                    break
            if iter % opt.folderSize == 0:
                break
        if iter >= opt.sampleSize:
            break
    saveFeature(opt.name, opt)
    peek(opt.data, opt.model)

    with open(opt.name + "/mark", "w") as f:
        f.write("")
def DCGAN_cluster_main(opt):
    g = Globals()

    N_CLUSTER = 200
    cluster = np.load('/scratch/ys646/gan/features/celeba/clus.npy')

    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = '/scratch/ys646/gan/results/'
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

    # option 1: just don't shuffle, use a counter for indices
    # this is important to keep orders fixed
    opt.workers = 1
    dataset, dataloader = getDataSet(opt, needShuf=False)
    # option 2: shuffle but use a modified dataloader that also output indices

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz + N_CLUSTER, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    class _netD(nn.Module):
        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                # nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                # extra channel for input embedding
                nn.Conv2d(nc + 1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid())
            self.emb = nn.Embedding(N_CLUSTER, opt.imageSize * opt.imageSize)

        def forward(self, input, clus_var):
            out_emb = self.emb.forward(clus_var)
            out_emb = out_emb.view(-1, 1, 64, 64)
            new_input = torch.cat([out_emb, input], 1)
            output = self.main.forward(new_input)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        counter = 0
        for i, data in enumerate(dataloader, 0):
            ############################
            # (0) Get the corresponding clusters
            ###########################
            batch_size = data[1].size(0)
            clus_batch = cluster[counter:counter + batch_size]
            clus_batch = torch.from_numpy(clus_batch).long()
            counter = counter + batch_size

            clus_var = Variable(clus_batch.cuda(), requires_grad=False)
            oh = torch.FloatTensor(batch_size, N_CLUSTER)
            oh.zero_()
            oh.scatter_(1, clus_batch.view(-1, 1), 1)
            oh_var = Variable(oh.cuda(), requires_grad=False)

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input, clus_var)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            # pad noise with one hot
            fake = netG(torch.cat([noise, oh_var], 1))

            label.data.fill_(fake_label)
            output = netD(fake.detach(), clus_var)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake, clus_var)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print(
                '[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                % (epoch, opt.niter, i, len(dataloader), errD.data[0],
                   errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(torch.cat([fixed_noise, oh_var], 1))
                saveImage(fake.data,
                          '%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(),
                   '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
        torch.save(netD.state_dict(),
                   '%s/netD_epoch_%d.pth' % (opt.outf, epoch))

    with open(opt.outf + "/mark", "w") as f:
        f.write("")
示例#8
0
def MGGAN_main(opt):

    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    nc = 1 if opt.data.startswith("mnist") else 3
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 30
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "MGGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    nloss = 200

    class _netD(nn.Module):

        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)
            )
            self.main2 = nn.Sequential(
                # state size. (ndf*8) x 4 x 4
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(ndf * 8, nloss, 4, 1, 0, bias=False),
                # nn.Linear(ndf*8*4*4,nloss),
                nn.Sigmoid()
            )

        def forward(self, input):
            self.feature = self.main.forward(input)
            # output=self.main2.forward(self.feature.view(input.size(0),-1))
            output = self.main2.forward(self.feature)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize, nloss)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    real_batch = 11
    grow_speed = 5
    for epoch in range(opt.niter):
        if epoch % grow_speed == 0:
            if real_batch > 1:
                real_batch -= 1

            real_inputs = torch.FloatTensor(
                real_batch * opt.batchSize, nc, opt.imageSize, opt.imageSize)
        pointer = 0
        for i, data in enumerate(dataloader, 0):
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            if batch_size < opt.batchSize:
                continue
            pointer = pointer % real_batch + 1

            if pointer < real_batch:  # still need to fill the batch
                # copy data
                real_inputs[
                    pointer * batch_size:(pointer + 1) * batch_size].copy_(real_cpu)
                continue
            # Done collecting! Now we can collect all the feature vectors..
            input.data.resize_(real_inputs.size()).copy_(real_inputs)
            netD(input)
            true_features = netD.feature.view(
                real_inputs.size(0), -1)  # make feature a vector

            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            fake_features = netD.feature.view(batch_size, -1)

            # Now we need to make a pair between pair and true.. run it as a LP
            # program...
            map = solve(fake_features.data, true_features.data)
            input.data.resize_(real_cpu.size())
            for j in range(0, batch_size):
                input.data[j].copy_(real_inputs[map[j]])

            tot_mini_batch = 10
            for mini_batch in range(0, tot_mini_batch):
                label.data.fill_(real_label)
                netD.zero_grad()
                output = netD(input)
                errD_real = criterion(output, label)
                errD_real.backward()
                D_x = output.data.mean()

                fake = netG(noise)
                label.data.fill_(fake_label)
                output = netD(fake.detach())
                errD_fake = criterion(output, label)
                errD_fake.backward()
                D_G_z1 = output.data.mean()

                errD = errD_real + errD_fake
                optimizerD.step()

                ############################
                # (2) Update G network: maximize log(D(G(z)))
                ###########################
                netG.zero_grad()
                # fake labels are real for generator cost
                label.data.fill_(real_label)
                output = netD(fake)
                errG = criterion(output, label)
                errG.backward()
                D_G_z2 = output.data.mean()
                optimizerG.step()

                print('[%d/%d][%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                      % (epoch, opt.niter, i, len(dataloader), mini_batch, tot_mini_batch,
                         errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
            fake = netG(fixed_noise)
            saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                      (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
示例#9
0
def NNGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netF = ''
    opt.netC = ''
    opt.outf = g.default_model_dir + "NNGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    cudnn.be1hmark = True

    dataset, dataloader = getDataSet(opt, needShuf=False)

    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    1 = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(100, 1, 64)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
        print("Load netg")
    print(netG)

    class _netFeature(nn.Module):

        def __init__(self):
            super(_netFeature, self).__init__()
            self.main = nn.Sequential(
                # input is (1) x 64 x 64
                nn.Conv2d(1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
            )

        def forward(self, input):
            output = self.main.forward(input).view(input.size(0), -1)
            # outputN=torch.norm(output,2,1)
            # return output/(outputN.expand_as(output))
            return output

    class _netCv(nn.Module):

        def __init__(self):
            super(_netCv, self).__init__()
            self.main = nn.Sequential(
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid()
            )

        def forward(self, input):
            return self.main(input.view(input.size(0), 512, 4, 4)).view(-1, 1)

    netF = _netFeature()
    netF.apply(weights_init)
    print(netF)
    netC = _netCv()
    netC.apply(weights_init)
    print(netC)
    if opt.netF != '':
        netF.load_state_dict(torch.load(opt.netF))
        print("Load netf")
    if opt.netC != '':
        netC.load_state_dict(torch.load(opt.netC))
        print("Load netc")

    criterion = nn.BCELoss()

    core_batch = 64
    input = torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(core_batch)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netF.cuda()
        netC.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerF = optim.Adam(netF.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerC = optim.Adam(netC.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    core_input = Variable(torch.FloatTensor(
        core_batch, 1, opt.imageSize, opt.imageSize).cuda())

    for epoch in range(opt.niter):

        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            netF.zero_grad()
            netC.zero_grad()

            noise.data.resize_(core_batch, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.resize_(core_batch).fill_(fake_label)
            fake_features = netF(fake.detach())
            output = netC(fake_features)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()

            real_cpu, _ = data
            # We only do full mini-batches, ignore the last mini-batch
            if (real_cpu.size(0) < opt.batchSize):
                print("Skip small mini batch!")
                continue
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            true_features = netF(input)
            M = dista1e(fake_features.data.view(fake_features.size(
                0), -1), true_features.data.view(real_cpu.size(0), -1), False)
            # get the specific neighbors of features in F_true
            _, fake_true_neighbors = torch.min(M, 1)
            unique_nn = np.unique(fake_true_neighbors.numpy()).size
            core_input.data.copy_(torch.index_select(
                real_cpu, 0, fake_true_neighbors.view(-1)))

            true_features = netF(core_input)
            output = netC(true_features)
            label.data.resize_(core_batch).fill_(real_label)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            errD = errD_real + errD_fake
            optimizerF.step()
            optimizerC.step()

            ############################
            # (2) Update G network: DCGAN
            ###########################

            netG.zero_grad()

            # fake labels are real for generator cost
            label.data.fill_(real_label)
            fake_features = netF(fake)
            output = netC(fake_features)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f D(x): %.4f D(G(z)): %.4f, %.4f unique=%d'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], D_x, D_G_z1, D_G_z2, unique_nn))

            if i % 50 == 0:
                saveImage(real_cpu[0:64], '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netF.state_dict(), '%s/netF_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netC.state_dict(), '%s/netC_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
def WGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lrD = 0.00005
    opt.lrG = 0.00005
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.clamp_lower = -0.01
    opt.clamp_upper = 0.01
    opt.Diters = 5
    opt.n_extra_layers = 0
    opt.outf = g.default_model_dir + "WGAN/"
    opt.adam = False

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.outf is None:
        opt.outf = 'samples'
    os.system('mkdir {0}'.format(opt.outf))

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3
    n_extra_layers = int(opt.n_extra_layers)

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = WGAN_G(opt.imageSize, nz, nc, ngf, ngpu, n_extra_layers)

    netG.apply(weights_init)
    if opt.netG != '':  # load checkpoint if needed
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = WGAN_D(opt.imageSize, nz, nc, ndf, ngpu, n_extra_layers)
    netD.apply(weights_init)

    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    one = torch.FloatTensor([1])
    mone = one * -1

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        input = input.cuda()
        one, mone = one.cuda(), mone.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    # setup optimizer
    if opt.adam:
        optimizerD = optim.Adam(
            netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.999))
        optimizerG = optim.Adam(
            netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.999))
    else:
        optimizerD = optim.RMSprop(netD.parameters(), lr=opt.lrD)
        optimizerG = optim.RMSprop(netG.parameters(), lr=opt.lrG)

    gen_iterations = 0
    for epoch in range(opt.niter):
        data_iter = iter(dataloader)
        i = 0
        while i < len(dataloader):
            ############################
            # (1) Update D network
            ###########################
            for p in netD.parameters():  # reset requires_grad
                p.requires_grad = True  # they are set to False below in netG update

            # train the discriminator Diters times
            if gen_iterations < 25 or gen_iterations % 500 == 0:
                Diters = 100
            else:
                Diters = opt.Diters
            j = 0
            while j < Diters and i < len(dataloader):
                j += 1

                # clamp parameters to a cube
                for p in netD.parameters():
                    p.data.clamp_(opt.clamp_lower, opt.clamp_upper)

                data = data_iter.next()
                i += 1

                # train with real
                real_cpu, _ = data
                netD.zero_grad()
                batch_size = real_cpu.size(0)

                if opt.cuda:
                    real_cpu = real_cpu.cuda()
                input.resize_as_(real_cpu).copy_(real_cpu)
                inputv = Variable(input)

                errD_real = netD(inputv)
                errD_real.backward(one)

                # train with fake
                noise.resize_(opt.batchSize, nz, 1, 1).normal_(0, 1)
                noisev = Variable(noise, volatile=True)  # totally freeze netG
                fake = Variable(netG(noisev).data)
                inputv = fake
                errD_fake = netD(inputv)
                errD_fake.backward(mone)
                errD = errD_real - errD_fake
                optimizerD.step()

            ############################
            # (2) Update G network
            ###########################
            for p in netD.parameters():
                p.requires_grad = False  # to avoid computation
            netG.zero_grad()
            # in case our last batch was the tail batch of the dataloader,
            # make sure we feed a full batch of noise
            noise.resize_(opt.batchSize, nz, 1, 1).normal_(0, 1)
            noisev = Variable(noise)
            fake = netG(noisev)
            errG = netD(fake)
            errG.backward(one)
            optimizerG.step()
            gen_iterations += 1

            print('[%d/%d][%d/%d][%d] Loss_D: %f Loss_G: %f Loss_D_real: %f Loss_D_fake %f'
                  % (epoch, opt.niter, i, len(dataloader), gen_iterations,
                     errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0]))
            if gen_iterations % 50 == 0:
                saveImage(real_cpu, '{0}/real_samples.png'.format(opt.outf))
                fake = netG(Variable(fixed_noise, volatile=True))
                saveImage(
                    fake.data, '{0}/fake_samples_{1}.png'.format(opt.outf, gen_iterations))

        # do checkpointing
        torch.save(netG.state_dict(),
                   '{0}/netG_epoch_{1}.pth'.format(opt.outf, epoch))
        torch.save(netD.state_dict(),
                   '{0}/netD_epoch_{1}.pth'.format(opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
def DCGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 128
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = DCGAN_D(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.to(device)
        netG.to(device)
        criterion.to(device)
        input, label = input.to(device), label.to(device)
        noise, fixed_noise = noise.to(device), fixed_noise.to(device)

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
示例#12
0
def NNGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netF = ''
    opt.netC = ''
    opt.outf = g.default_model_dir + "NNGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    cudnn.benchmark = True

    dataset, dataloader = getDataSet(opt, needShuf=False)

    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(100, nc, 64)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
        print("Load netg")
    print(netG)

    class _netFeature(nn.Module):

        def __init__(self):
            super(_netFeature, self).__init__()
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
            )

        def forward(self, input):
            output = self.main.forward(input).view(input.size(0), -1)
            # outputN=torch.norm(output,2,1)
            # return output/(outputN.expand_as(output))
            return output

    class _netCv(nn.Module):

        def __init__(self):
            super(_netCv, self).__init__()
            self.main = nn.Sequential(
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid()
            )

        def forward(self, input):
            return self.main(input.view(input.size(0), 512, 4, 4)).view(-1, 1)

    netF = _netFeature()
    netF.apply(weights_init)
    print(netF)
    netC = _netCv()
    netC.apply(weights_init)
    print(netC)
    if opt.netF != '':
        netF.load_state_dict(torch.load(opt.netF))
        print("Load netf")
    if opt.netC != '':
        netC.load_state_dict(torch.load(opt.netC))
        print("Load netc")

    criterion = nn.BCELoss()

    core_batch = 64
    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(core_batch)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netF.cuda()
        netC.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerF = optim.Adam(netF.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerC = optim.Adam(netC.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    core_input = Variable(torch.FloatTensor(
        core_batch, nc, opt.imageSize, opt.imageSize).cuda())

    for epoch in range(opt.niter):

        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            netF.zero_grad()
            netC.zero_grad()

            noise.data.resize_(core_batch, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.resize_(core_batch).fill_(fake_label)
            fake_features = netF(fake.detach())
            output = netC(fake_features)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()

            real_cpu, _ = data
            # We only do full mini-batches, ignore the last mini-batch
            if (real_cpu.size(0) < opt.batchSize):
                print("Skip small mini batch!")
                continue
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            true_features = netF(input)
            M = distance(fake_features.data.view(fake_features.size(
                0), -1), true_features.data.view(real_cpu.size(0), -1), False)
            # get the specific neighbors of features in F_true
            _, fake_true_neighbors = torch.min(M, 1)
            unique_nn = np.unique(fake_true_neighbors.numpy()).size
            core_input.data.copy_(torch.index_select(
                real_cpu, 0, fake_true_neighbors.view(-1)))

            true_features = netF(core_input)
            output = netC(true_features)
            label.data.resize_(core_batch).fill_(real_label)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            errD = errD_real + errD_fake
            optimizerF.step()
            optimizerC.step()

            ############################
            # (2) Update G network: DCGAN
            ###########################

            netG.zero_grad()

            # fake labels are real for generator cost
            label.data.fill_(real_label)
            fake_features = netF(fake)
            output = netC(fake_features)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f D(x): %.4f D(G(z)): %.4f, %.4f unique=%d'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], D_x, D_G_z1, D_G_z2, unique_nn))

            if i % 50 == 0:
                saveImage(real_cpu[0:64], '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netF.state_dict(), '%s/netF_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netC.state_dict(), '%s/netC_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
示例#13
0
def MGGAN_main(opt):

    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    nc = 1 if opt.data.startswith("mnist") else 3
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 30
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "MGGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    nloss = 200

    class _netD(nn.Module):

        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)
            )
            self.main2 = nn.Sequential(
                # state size. (ndf*8) x 4 x 4
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(ndf * 8, nloss, 4, 1, 0, bias=False),
                # nn.Linear(ndf*8*4*4,nloss),
                nn.Sigmoid()
            )

        def forward(self, input):
            self.feature = self.main.forward(input)
            # output=self.main2.forward(self.feature.view(input.size(0),-1))
            output = self.main2.forward(self.feature)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize, nloss)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    real_batch = 11
    grow_speed = 5
    for epoch in range(opt.niter):
        if epoch % grow_speed == 0:
            if real_batch > 1:
                real_batch -= 1

            real_inputs = torch.FloatTensor(
                real_batch * opt.batchSize, nc, opt.imageSize, opt.imageSize)
        pointer = 0
        for i, data in enumerate(dataloader, 0):
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            if batch_size < opt.batchSize:
                continue
            pointer = pointer % real_batch + 1

            if pointer < real_batch:  # still need to fill the batch
                # copy data
                real_inputs[
                    pointer * batch_size:(pointer + 1) * batch_size].copy_(real_cpu)
                continue
            # Done collecting! Now we can collect all the feature vectors..
            input.data.resize_(real_inputs.size()).copy_(real_inputs)
            netD(input)
            true_features = netD.feature.view(
                real_inputs.size(0), -1)  # make feature a vector

            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            fake_features = netD.feature.view(batch_size, -1)

            # Now we need to make a pair between pair and true.. run it as a LP
            # program...
            map = solve(fake_features.data, true_features.data)
            input.data.resize_(real_cpu.size())
            for j in range(0, batch_size):
                input.data[j].copy_(real_inputs[map[j]])

            tot_mini_batch = 10
            for mini_batch in range(0, tot_mini_batch):
                label.data.fill_(real_label)
                netD.zero_grad()
                output = netD(input)
                errD_real = criterion(output, label)
                errD_real.backward()
                D_x = output.data.mean()

                fake = netG(noise)
                label.data.fill_(fake_label)
                output = netD(fake.detach())
                errD_fake = criterion(output, label)
                errD_fake.backward()
                D_G_z1 = output.data.mean()

                errD = errD_real + errD_fake
                optimizerD.step()

                ############################
                # (2) Update G network: maximize log(D(G(z)))
                ###########################
                netG.zero_grad()
                # fake labels are real for generator cost
                label.data.fill_(real_label)
                output = netD(fake)
                errG = criterion(output, label)
                errG.backward()
                D_G_z2 = output.data.mean()
                optimizerG.step()

                print('[%d/%d][%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                      % (epoch, opt.niter, i, len(dataloader), mini_batch, tot_mini_batch,
                         errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
            fake = netG(fixed_noise)
            saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                      (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")
def folder_sampler(opt):
    opt.workers = 2
    opt.imageSize = 64
    opt.batchSize = 600
    opt.outTrueA = 'true/'
    opt.outTrueB = 'true_test/'
    opt.outTrueC = 'true_test2/'
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    assert (opt.batchSize % 3 == 0)

    print_prop(opt)
    opt.outTrueA = opt.outf + "samples/" + opt.data + "/" + opt.outTrueA
    opt.outTrueB = opt.outf + "samples/" + opt.data + "/" + opt.outTrueB
    opt.outTrueC = opt.outf + "samples/" + opt.data + "/" + opt.outTrueC
    folderList = [opt.outTrueA, opt.outTrueB, opt.outTrueC]

    if (os.path.exists(opt.outTrueC)):
        if (os.path.exists(opt.outTrueC + "/mark")):  # indeed finished
            print("Sampling already finished before. Now pass.")
            for f in folderList:
                saveFeature(f, opt, opt.feature_model)
            return
        else:
            print("Partially finished. Now rerun. ")

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    mkdir(opt.outTrueA)
    mkdir(opt.outTrueB)
    mkdir(opt.outTrueC)

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    dataset, dataloader = getDataSet(opt)

    assert (len(dataset) >= opt.sampleSize * 3)

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    subfolder = -1
    splits = len(folderList)

    for i, data in enumerate(dataloader, 0):
        img, _ = data
        if i % splits == 0:
            subfolder += 1
        for j in range(0, len(img)):
            curFolder = folderList[j % splits]
            mkdir(curFolder + str(subfolder))
            if iter >= splits * opt.sampleSize:
                break
            saveImage(
                img[j],
                curFolder + str(subfolder) + "/" + giveName(iter) + ".png")
            iter += 1
        if iter >= splits * opt.sampleSize:
            break

    for f in folderList:
        saveFeature(f, opt, opt.feature_model)
        peek(opt.data, os.path.relpath(f, opt.outf + "samples/" + opt.data))

    for folder in folderList:
        with open(folder + "/mark", "w") as f:
            f.write("")
def DCGAN_cluster_main(opt):
    g = Globals()

    N_CLUSTER = 200
    cluster = np.load('/scratch/ys646/gan/features/celeba/clus.npy')

    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = '/scratch/ys646/gan/results/'
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    # option 1: just don't shuffle, use a counter for indices
    # this is important to keep orders fixed
    opt.workers = 1
    dataset, dataloader = getDataSet(opt, needShuf=False)
    # option 2: shuffle but use a modified dataloader that also output indices

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz + N_CLUSTER, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    class _netD(nn.Module):

        def __init__(self, ngpu):
            super(_netD, self).__init__()
            self.ngpu = ngpu
            self.main = nn.Sequential(
                # input is (nc) x 64 x 64
                # nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
                # extra channel for input embedding
                nn.Conv2d(nc + 1, ndf, 4, 2, 1, bias=False),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf) x 32 x 32
                nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 2),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*2) x 16 x 16
                nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 4),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*4) x 8 x 8
                nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                # state size. (ndf*8) x 4 x 4
                nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
                nn.Sigmoid()
            )
            self.emb = nn.Embedding(N_CLUSTER, opt.imageSize * opt.imageSize)

        def forward(self, input, clus_var):
            out_emb = self.emb.forward(clus_var)
            out_emb = out_emb.view(-1, 1, 64, 64)
            new_input = torch.cat([out_emb, input], 1)
            output = self.main.forward(new_input)
            return output.view(-1, 1)

    netD = _netD(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        counter = 0
        for i, data in enumerate(dataloader, 0):
            ############################
            # (0) Get the corresponding clusters
            ###########################
            batch_size = data[1].size(0)
            clus_batch = cluster[counter:counter + batch_size]
            clus_batch = torch.from_numpy(clus_batch).long()
            counter = counter + batch_size

            clus_var = Variable(clus_batch.cuda(), requires_grad=False)
            oh = torch.FloatTensor(batch_size, N_CLUSTER)
            oh.zero_()
            oh.scatter_(1, clus_batch.view(-1, 1), 1)
            oh_var = Variable(oh.cuda(), requires_grad=False)

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input, clus_var)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            # pad noise with one hot
            fake = netG(torch.cat([noise, oh_var], 1))

            label.data.fill_(fake_label)
            output = netD(fake.detach(), clus_var)
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake, clus_var)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(torch.cat([fixed_noise, oh_var], 1))
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))

    with open(opt.outf + "/mark", "w") as f:
        f.write("")
def folder_sampler(opt):
    opt.workers = 2
    opt.imageSize = 64
    opt.batchSize = 600
    opt.outTrueA = 'true/'
    opt.outTrueB = 'true_test/'
    opt.outTrueC = 'true_test2/'
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    assert(opt.batchSize % 3 == 0)

    print_prop(opt)
    opt.outTrueA = opt.outf + "samples/" + opt.data + "/" + opt.outTrueA
    opt.outTrueB = opt.outf + "samples/" + opt.data + "/" + opt.outTrueB
    opt.outTrueC = opt.outf + "samples/" + opt.data + "/" + opt.outTrueC
    folderList = [opt.outTrueA, opt.outTrueB, opt.outTrueC]

    if (os.path.exists(opt.outTrueC)):
        if (os.path.exists(opt.outTrueC + "/mark")):  # indeed finished
            print("Sampling already finished before. Now pass.")
            for f in folderList:
                saveFeature(f, opt, opt.feature_model)
            return
        else:
            print("Partially finished. Now rerun. ")

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    mkdir(opt.outTrueA)
    mkdir(opt.outTrueB)
    mkdir(opt.outTrueC)

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    dataset, dataloader = getDataSet(opt)

    assert(len(dataset) >= opt.sampleSize * 3)

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    subfolder = -1
    splits = len(folderList)

    for i, data in enumerate(dataloader, 0):
        img, _ = data
        if i % splits == 0:
            subfolder += 1
        for j in range(0, len(img)):
            curFolder = folderList[j % splits]
            mkdir(curFolder + str(subfolder))
            if iter >= splits * opt.sampleSize:
                break
            saveImage(img[j], curFolder + str(subfolder) +
                      "/" + giveName(iter) + ".png")
            iter += 1
        if iter >= splits * opt.sampleSize:
            break

    for f in folderList:
        saveFeature(f, opt, opt.feature_model)
        peek(opt.data, os.path.relpath(f, opt.outf + "samples/" + opt.data))

    for folder in folderList:
        with open(folder + "/mark", "w") as f:
            f.write("")
示例#17
0
def DCGAN_main(opt):
    g = Globals()

    opt.workers = 2
    opt.batchSize = 64
    opt.imageSize = 64
    opt.nz = 100
    opt.ngf = 64
    opt.ndf = 64
    opt.niter = 50
    opt.lr = 0.0002
    opt.beta1 = 0.5
    opt.cuda = True
    opt.ngpu = 1
    opt.netG = ''
    opt.netD = ''
    opt.outf = g.default_model_dir + "DCGAN/"
    opt.manualSeed = None

    opt = addDataInfo(opt)
    opt.outf = opt.outf + opt.data + "/"
    print_prop(opt)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass
    if os.path.exists(opt.outf + "/mark"):
        print("Already generated before. Now exit.")
        return

    if opt.manualSeed is None:
        opt.manualSeed = random.randint(1, 10000)
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    if opt.cuda:
        torch.cuda.manual_seed_all(opt.manualSeed)

    cudnn.benchmark = True

    if torch.cuda.is_available() and not opt.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    dataset, dataloader = getDataSet(opt)

    ngpu = int(opt.ngpu)
    nz = int(opt.nz)
    ngf = int(opt.ngf)
    ndf = int(opt.ndf)
    nc = 1 if opt.data.startswith("mnist") else 3

    # custom weights initialization called on netG and netD
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv') != -1:
            m.weight.data.normal_(0.0, 0.02)
        elif classname.find('BatchNorm') != -1:
            m.weight.data.normal_(1.0, 0.02)
            m.bias.data.fill_(0)

    netG = DCGAN_G(nz, nc, ngf)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
    print(netG)

    netD = DCGAN_D(ngpu)
    netD.apply(weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))
    print(netD)

    criterion = nn.BCELoss()

    input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
    noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
    fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
    label = torch.FloatTensor(opt.batchSize)
    real_label = 1
    fake_label = 0

    if opt.cuda:
        netD.cuda()
        netG.cuda()
        criterion.cuda()
        input, label = input.cuda(), label.cuda()
        noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

    input = Variable(input)
    label = Variable(label)
    noise = Variable(noise)
    fixed_noise = Variable(fixed_noise)

    # setup optimizer
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr,
                            betas=(opt.beta1, 0.999))

    for epoch in range(opt.niter):
        for i, data in enumerate(dataloader, 0):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu, _ = data
            batch_size = real_cpu.size(0)
            input.data.resize_(real_cpu.size()).copy_(real_cpu)
            label.data.resize_(batch_size).fill_(real_label)

            output = netD(input)
            errD_real = criterion(output, label)
            errD_real.backward()
            D_x = output.data.mean()

            # train with fake
            noise.data.resize_(batch_size, nz, 1, 1)
            noise.data.normal_(0, 1)
            fake = netG(noise)
            label.data.fill_(fake_label)
            output = netD(fake.detach())
            errD_fake = criterion(output, label)
            errD_fake.backward()
            D_G_z1 = output.data.mean()
            errD = errD_real + errD_fake
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            # fake labels are real for generator cost
            label.data.fill_(real_label)
            output = netD(fake)
            errG = criterion(output, label)
            errG.backward()
            D_G_z2 = output.data.mean()
            optimizerG.step()

            print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                  % (epoch, opt.niter, i, len(dataloader),
                     errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
            if i % 100 == 0:
                saveImage(real_cpu, '%s/real_samples.png' % opt.outf)
                fake = netG(fixed_noise)
                saveImage(fake.data, '%s/fake_samples_epoch_%03d.png' %
                          (opt.outf, epoch))

        # do checkpointing
        torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' %
                   (opt.outf, epoch))
        torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' %
                   (opt.outf, epoch))
    with open(opt.outf + "/mark", "w") as f:
        f.write("")