Exemplo n.º 1
0
    opts.imSize = 64
    opts.numSamples = 20
    opts.labels = ['Male', 'Smiling']
    # opts.batchSize = 100

    #Create new subfolder for saving results and training params
    exDir = join(
        opts.exDir,
        '' + opts.labels[0] + '_' + opts.labels[1] + '_face_sum_experiments')
    try:
        os.mkdir(exDir)
    except:
        print 'already exists'

    print 'Outputs will be saved to:', exDir
    save_input_args(exDir, opts)

    # Load data (glasses and male labels)
    IM_SIZE = opts.imSize
    print 'Prepare data loader...'
    transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    testDataset = CELEBA(
        root=opts.root,
        train=False,
        labels=opts.labels,
        transform=transform,
Exemplo n.º 2
0
def train_mode(gen,
               dis,
               trainLoader,
               useNoise=False,
               beta1=0.5,
               c=0.01,
               k=1,
               WGAN=False):
    ####### Define optimizer #######
    genOptimizer = optim.Adam(gen.parameters(),
                              lr=opts.lr,
                              betas=(beta1, 0.999))
    disOptimizer = optim.Adam(dis.parameters(),
                              lr=opts.lr,
                              betas=(beta1, 0.999))

    if gen.useCUDA:
        torch.cuda.set_device(opts.gpuNo)
        gen.cuda()
        dis.cuda()

    ####### Create a new folder to save results and model info #######
    exDir = make_new_folder(opts.outDir)
    print 'Outputs will be saved to:', exDir
    save_input_args(exDir, opts)

    #noise level
    noiseSigma = np.logspace(np.log2(0.5),
                             np.log2(0.001),
                             opts.maxEpochs,
                             base=2)

    ####### Start Training #######
    losses = {'gen': [], 'dis': []}
    for e in range(opts.maxEpochs):
        dis.train()
        gen.train()

        epochLoss_gen = 0
        epochLoss_dis = 0

        noiseLevel = float(noiseSigma[e])

        T = time()
        for i, data in enumerate(trainLoader, 0):

            for _ in range(k):
                # add a small amount of corruption to the data
                xReal = Variable(data[0])
                if gen.useCUDA:
                    xReal = xReal.cuda()

                if useNoise:
                    xReal = corrupt(xReal, noiseLevel)  #add a little noise

                ####### Calculate discriminator loss #######
                noSamples = xReal.size(0)

                xFake = gen.sample_x(noSamples)
                if useNoise:
                    xFake = corrupt(xFake, noiseLevel)  #add a little noise
                pReal_D = dis.forward(xReal)
                pFake_D = dis.forward(xFake.detach())

                real = dis.ones(xReal.size(0))
                fake = dis.zeros(xFake.size(0))

                if WGAN:
                    disLoss = pFake_D.mean() - pReal_D.mean()
                else:
                    disLoss = opts.pi * F.binary_cross_entropy(pReal_D, real) + \
                      (1 - opts.pi) * F.binary_cross_entropy(pFake_D, fake)

                ####### Do DIS updates #######
                disOptimizer.zero_grad()
                disLoss.backward()
                disOptimizer.step()

                #### clip DIS weights #### YM
                if WGAN:
                    for p in dis.parameters():
                        p.data.clamp_(-c, c)

                losses['dis'].append(disLoss.data[0])

            ####### Calculate generator loss #######
            xFake_ = gen.sample_x(noSamples)
            if useNoise:
                xFake_ = corrupt(xFake_, noiseLevel)  #add a little noise
            pFake_G = dis.forward(xFake_)

            if WGAN:
                genLoss = -pFake_G.mean()
            else:
                genLoss = F.binary_cross_entropy(pFake_G, real)

            ####### Do GEN updates #######
            genOptimizer.zero_grad()
            genLoss.backward()
            genOptimizer.step()

            losses['gen'].append(genLoss.data[0])

            ####### Print info #######
            if i % 100 == 1:
                print '[%d, %d] gen: %.5f, dis: %.5f, time: %.2f' \
                 % (e, i, genLoss.data[0], disLoss.data[0], time()-T)

        ####### Tests #######
        gen.eval()
        print 'Outputs will be saved to:', exDir
        #save some samples
        samples = gen.sample_x(49)
        save_image(samples.data,
                   join(exDir, 'epoch' + str(e) + '.png'),
                   normalize=True)

        #plot
        plot_losses(losses, exDir, epochs=e + 1)

        ####### Save params #######
        gen.save_params(exDir)
        dis.save_params(exDir)

    return gen, dis