예제 #1
0
파일: main.py 프로젝트: cash2one/animeGAN-1
def train(Generator, Discriminator, dataloader, criterion):
    # setup optimizer
    g_optim = torch.optim.Adam(Generator.parameters(),
                               lr=learning_rate,
                               betas=(0.5, 0.999))
    d_optim = torch.optim.Adam(Discriminator.parameters(),
                               lr=learning_rate,
                               betas=(0.5, 0.999))

    # initialize noise
    noise = torch.randn(batch_size, feature_dim, 1, 1)
    test_noise = torch.randn(batch_size, feature_dim, 1, 1)
    true_label = torch.FloatTensor(batch_size).fill_(1 - smooth_label)
    true_label_g = torch.FloatTensor(batch_size).fill_(1)
    fake_label = torch.FloatTensor(batch_size).fill_(0 + smooth_label)

    if args.cuda:
        noise = noise.cuda()
        test_noise = test_noise.cuda()
        true_label = true_label.cuda()
        true_label_g = true_label_g.cuda()
        fake_label = fake_label.cuda()

    # train!
    Generator.train()
    Discriminator.train()
    for epoch in range(num_epochs):
        for i, data in enumerate(dataloader):
            img = data['img']
            # label = data['label']

            if args.cuda:
                img = img.cuda()
                # label = label.cuda()

            img_real = Variable(img)
            # label = Variable(label)

            #------------------------#
            # Train  Discriminator
            #------------------------#
            Discriminator.zero_grad()
            # learn real data as real
            true_label_var = Variable(true_label)
            out_real = Discriminator(img_real)
            loss_d_real = criterion(out_real, true_label_var)

            # learn fake data as fake
            noise_var = Variable(noise)
            # generate fake data with generator
            img_fake = Generator(noise_var)
            fake_label_var = Variable(fake_label)
            out_fake = Discriminator(img_fake.detach())
            loss_d_fake = criterion(out_fake, fake_label_var)

            loss_d = loss_d_real + loss_d_fake
            loss_d.backward()
            d_optim.step()

            #-------------------#
            # Train Generator
            #-------------------#
            Generator.zero_grad()
            # fool discriminator to learn as real with fake data
            true_label_g_var = Variable(true_label_g)
            out_real_but_fake = Discriminator(img_fake)
            loss_g = criterion(out_real_but_fake, true_label_g_var)
            loss_g.backward()
            g_optim.step()

            print(
                "Epoch [%d/%d] Iter [%d/%d] Loss D : %.4f, Loss G : %.4f, D(x) : %.4f, D(z) : %.4f, g : %.4f"
                % (epoch + 1, num_epochs, i + 1, len(dataloader),
                   loss_d.data[0], loss_g.data[0], loss_d_real.data.mean(),
                   loss_d_fake.data.mean(), loss_g.data.mean()))

            niter = epoch * len(dataloader) + i + 1
            writer.add_scalar('Loss/D', loss_d.data[0], niter)
            writer.add_scalar('Loss/G', loss_g.data[0], niter)
            writer.add_scalar('D/D(x)', loss_d_real.data.mean(), niter)
            writer.add_scalar('D/D(z)', loss_d_fake.data.mean(), niter)
            writer.add_scalar('D/g', loss_g.data.mean(), niter)

            # generate on the way
            if (i + 1) % 100 == 0:
                test_noise_var = Variable(test_noise)
                test_img = Generator(test_noise_var)
                vutils.save_image(test_img.data,
                                  base_dir + 'fake_img_epoch_%d_iter_%d.png' %
                                  (epoch + 1, i + 1),
                                  normalize=True)
                writer.add_image(
                    'fake_images',
                    vutils.make_grid(test_img.data, normalize=True), niter)

        # save model
        torch.save(Generator.state_dict(), 'Generator.pth')
        torch.save(Discriminator.state_dict(), 'Discriminator.pth')

    writer.close()
예제 #2
0
    torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
dataset = torchvision.datasets.ImageFolder(opt.data_path, transform=transforms)

dataloader = torch.utils.data.DataLoader(
    dataset=dataset,
    batch_size=opt.batchSize,
    shuffle=True,
    drop_last=True,
)

Generator = Generator(opt.ngf, opt.nz).to(device)
Discriminator = Discriminator(opt.ndf).to(device)

criterion = nn.BCELoss()
optimizerG = torch.optim.Adam(Generator.parameters(),
                              lr=opt.lr,
                              betas=(opt.beta1, 0.999))
optimizerD = torch.optim.Adam(Discriminator.parameters(),
                              lr=opt.lr,
                              betas=(opt.beta1, 0.999))

label = torch.FloatTensor(opt.batchSize)

real_label = 1
fake_label = 0
for epoch in range(1, opt.epoch + 1):
    for i, (imgs, _) in enumerate(dataloader):
        optimizerD.zero_grad()

        imgs = imgs.to(device)