def single_item_recon(batch):

    for item_num in range(32):
        # x = train_loader[batch][item_num]
        x = data_folder[opt.batch_size * batch + item_num]
        x = x.view(1, 3, 64, 64)
        z = learnable_z[batch][item_num].view(1, 256, 1, 1)

        x = to_variable(x)
        z = to_variable(z)
        x_hat = generator.forward(z)

    z = learnable_z[batch]

    print("saving recon images in ", sample_path)

    torchvision.utils.save_image(
        denorm(x.data),
        os.path.join(sample_path,
                     'real_samples-%d-%d.png' % (batch, item_num)),
        nrow=8)
    # save the generated images
    torchvision.utils.save_image(
        denorm(x_hat.data),
        os.path.join(sample_path, 'recon_test-%d-%d.png' % (batch, item_num)),
        nrow=8)
Beispiel #2
0
    def test(self):
        load_checkpoint(self.netG_A2B,
                        './results/cyclegan/checkpoints/netG_A2B_final.pth',
                        self.device)
        load_checkpoint(self.netG_B2A,
                        './results/cyclegan/checkpoints/netG_B2A_final.pth',
                        self.device)

        os.makedirs('./results/cyclegan/evaluation/', exist_ok=True)

        self.netG_A2B.eval()
        self.netG_B2A.eval()

        print("=====Test Start======")

        with torch.no_grad():
            for iter, (real_A, real_B) in enumerate(self.testloader):
                fake_A = self.netG_B2A(real_B.to(self.device))
                save_image(
                    denorm(fake_A),
                    os.path.join('./results/cyclegan/evaluation',
                                 'fake_image-{:05d}.png'.format(iter + 1)))

        # Compute the Inception score
        dataset = FolderDataset(
            folder=os.path.join('./results/cyclegan/evaluation'))
        Inception = Inception_Score(dataset)
        score = Inception.compute_score(splits=1)

        print('Inception Score : ', score)
Beispiel #3
0
    def test(self):
        load_checkpoint(
            self.netG,
            os.path.join('./results', self.type, 'checkpoints',
                         'netG_final.pth'), self.device)
        self.netG.eval()

        os.makedirs(os.path.join('./results/', self.type, 'evaluation'),
                    exist_ok=True)

        print("=====Test Start======")

        with torch.no_grad():
            for iter in range(1000):
                z = torch.randn(1, 256).to(self.device)
                fake_img = self.netG(z)
                save_image(
                    denorm(fake_img),
                    os.path.join('./results/', self.type, 'evaluation',
                                 'fake_image-{:05d}.png'.format(iter + 1)))

        # Compute the Inception score
        dataset = FolderDataset(
            folder=os.path.join('./results/', self.type, 'evaluation'))
        Inception = Inception_Score(dataset)
        score = Inception.compute_score(splits=1)

        print('Inception Score : ', score)
def recon_test(iter_num):
    for i, x in enumerate(train_loader):
        print(i)
        if i == iter_num:
            x = to_variable(x)
            z = to_variable(learnable_z[i])
            x_hat = generator.forward(z)

            print("saving recon images in ", sample_path)

            torchvision.utils.save_image(denorm(x.data),
                                         os.path.join(
                                             sample_path,
                                             'real_samples-%d.png' % iter_num),
                                         nrow=8)
            # save the generated images
            torchvision.utils.save_image(denorm(x_hat.data),
                                         os.path.join(
                                             sample_path,
                                             'recon_test-%d.png' % iter_num),
                                         nrow=8)
            break
def generate_test(iter_num):
    for i in range(iter_num):

        z = torch.randn(opt.batch_size, opt.z_dim, 1, 1)
        z = to_variable(z)

        if opt.gpu:
            z.cuda()

        x_hat = generator.forward(z)

        print("saving generated images in ", sample_path)

        # save the generated images
        torchvision.utils.save_image(denorm(x_hat.data),
                                     os.path.join(sample_path,
                                                  'generate_test-%d.png' % i),
                                     nrow=8)
        z_update_norm = z_update / norm[:, np.newaxis]

        if opt.gpu:
            learnable_z[i] = torch.from_numpy(z_update_norm).cuda()
        else:
            learnable_z[i] = torch.from_numpy(z_update_norm).cpu()

        if (i + 1) % log_step == 0:
            print('Epoch [%d/%d], Step[%d/%d], loss: %f, l1: %f, lap: %f'
                  % (epoch + 1, n_epochs, i + 1, total_step, loss.data[0], l1_loss.data[0],
                     lap_loss.data[0]
                     ))

        # save the real images
        if (i + 1) == sample_step:
            torchvision.utils.save_image(denorm(x.data),
                                         os.path.join(sample_path,
                                                      'real_samples-%d-%d.png' % (
                                                          epoch + 1, i + 1)), nrow=4)
        # save the generated images
        if (i + 1) % sample_step == 0:
            torchvision.utils.save_image(denorm(x_hat.data),
                                         os.path.join(sample_path,
                                                      'fake_samples-%d-%d.png' % (
                                                          epoch + 1, i + 1)), nrow=4)

    if (epoch + 1) % opt.ckpt_step == 0:
        print("saving checkpoint ..")
        checkpoint_path = os.path.join(model_path, 'checkpoint_%d.pth.tar' % (epoch + 1))

        save_checkpoint({
Beispiel #7
0
    def train(self, epochs: int = 100):

        self.netG_A2B.to(self.device)
        self.netG_B2A.to(self.device)
        self.netD_A.to(self.device)
        self.netD_B.to(self.device)

        start_time = time.time()

        print("=====Train Start======")

        for epoch in range(epochs):
            for iter, (real_A, real_B) in enumerate(self.trainloader):
                self.netG_A2B.train()
                self.netG_B2A.train()
                self.netD_A.train()
                self.netD_B.train()

                real_A = real_A.to(self.device)
                real_B = real_B.to(self.device)

                real_label = torch.ones(batch_size).to(self.device)
                fake_label = torch.zeros(batch_size).to(self.device)
                ###################################################################################
                # (1) Update Generator
                # Train the generator (self.netG_A2B & self.netG_B2A).
                # You have to implement 3 types of loss functions ('identity loss', 'gan loss', 'cycle consistency loss')
                ###################################################################################

                identity_loss: torch.Tensor() = None
                gan_loss: torch.Tensor() = None
                cycle_loss: torch.Tensor() = None
                lossG: torch.Tensor() = None

                ### YOUR CODE HERE (~ 15 lines)
                # forward network
                fake_B = self.netG_A2B(real_A)
                reco_A = self.netG_B2A(fake_B)
                fake_A = self.netG_B2A(real_B)
                reco_B = self.netG_A2B(fake_A)

                # hold discriminators
                for net in [self.netD_A, self.netD_B]:
                    if net is not None:
                        for param in net.parameters():
                            param.requires_grad = False

                self.optimizerG.zero_grad()

                # Identity loss
                idt_A = self.netG_A2B(real_B)
                loss_idt_A = self.criterion_identity(idt_A, real_B)
                idt_B = self.netG_B2A(real_A)
                loss_idt_B = self.criterion_identity(idt_B, real_A)

                identity_loss = self.weight_idt * (loss_idt_A + loss_idt_B)

                # GAN loss
                preD = self.netD_A(fake_B)
                loss_G_A = self.criterion_GAN(preD, real_label.expand_as(preD))
                preD = self.netD_B(fake_A)
                loss_G_B = self.criterion_GAN(preD, real_label.expand_as(preD))

                gan_loss = self.weight_gan * (loss_G_A + loss_G_B)

                # Cycle loss
                loss_cycle_A = self.criterion_cycle(reco_A, real_A)
                loss_cycle_B = self.criterion_cycle(reco_B, real_B)

                cycle_loss = self.weight_cycle * (loss_cycle_A + loss_cycle_B)

                lossG = identity_loss + gan_loss + cycle_loss
                ### END YOUR CODE

                # Test code
                # if epoch == 0 and iter == 0:
                # 5.7538, 2.2067, 11.5476
                # 5.4559, 2.2439, 10.9289
                # test_lossG_fuction(identity_loss, gan_loss, cycle_loss)

                self.optimizerG.zero_grad()
                lossG.backward()
                self.optimizerG.step()

                ###################################################################################
                # (2) Update Discriminator
                # Train the discrminator (self.netD_A & self.netD_B).
                ###################################################################################
                # Discriminator A

                lossD_A: torch.Tensor() = None

                ### YOUR CODE HERE (~ 4 lines)
                # activate discriminators
                for net in [self.netD_A, self.netD_B]:
                    if net is not None:
                        for param in net.parameters():
                            param.requires_grad = True
                pred_real = self.netD_A(real_B)
                lossD_A_real = self.criterion_GAN(
                    pred_real, real_label.expand_as(pred_real))
                pred_fake = self.netD_A(fake_B.detach())
                lossD_A_fake = self.criterion_GAN(
                    pred_fake, fake_label.expand_as(pred_fake))

                lossD_A = (lossD_A_real + lossD_A_fake)

                ### END YOUR CODE

                self.optimizerD_A.zero_grad()
                lossD_A.backward()
                self.optimizerD_A.step()

                # Discriminator B

                lossD_B: torch.Tensor() = None

                ### YOUR CODE HERE (~ 4 lines)
                pred_real = self.netD_B(real_A)
                lossD_B_real = self.criterion_GAN(
                    pred_real, real_label.expand_as(pred_real))
                pred_fake = self.netD_B(fake_A.detach())
                lossD_B_fake = self.criterion_GAN(
                    pred_fake, fake_label.expand_as(pred_fake))

                lossD_B = (lossD_B_real + lossD_B_fake)
                ### END YOUR CODE

                # Test code
                # if epoch == 0 and iter == 0:
                #     test_lossD_fuction(lossD_A, lossD_B)

                self.optimizerD_B.zero_grad()
                lossD_B.backward()
                self.optimizerD_B.step()

                if (iter + 1) % 100 == 0:
                    end_time = time.time() - start_time
                    end_time = str(datetime.timedelta(seconds=end_time))[:-7]
                    print(
                        'Time [%s], Epoch [%d/%d], Step[%d/%d], lossD_A: %.4f, lossD_B: %.4f, lossG: %.4f'
                        % (end_time, epoch + 1, epochs, iter + 1,
                           len(self.trainloader), lossD_A.item(),
                           lossD_A.item(), lossG.item()))

            # Save Images
            fake_A = fake_A.reshape(fake_A.size(0), 3, 256, 256)
            fake_B = fake_B.reshape(fake_B.size(0), 3, 256, 256)

            save_image(
                denorm(fake_B),
                os.path.join('./results/cyclegan/images',
                             'fakeA2B-{:03d}.png'.format(epoch + 1)))
            save_image(
                denorm(fake_A),
                os.path.join('./results/cyclegan/images',
                             'fakeB2A-{:03d}.png'.format(epoch + 1)))

            if (epoch + 1) % 10 == 0:
                save_checkpoint(
                    self.netG_A2B,
                    './results/cyclegan/checkpoints/netG_A2B_{:02d}.pth'.
                    format(epoch + 1), self.device)
                save_checkpoint(
                    self.netG_B2A,
                    './results/cyclegan/checkpoints/netG_B2A_{:02d}.pth'.
                    format(epoch + 1), self.device)

                save_checkpoint(
                    self.netD_A,
                    './results/cyclegan/checkpoints/netD_A_{:02d}.pth'.format(
                        epoch + 1), self.device)
                save_checkpoint(
                    self.netD_B,
                    './results/cyclegan/checkpoints/netD_B_{:02d}.pth'.format(
                        epoch + 1), self.device)

        # Save Checkpoints
        save_checkpoint(self.netG_A2B,
                        './results/cyclegan/checkpoints/netG_A2B_final.pth',
                        self.device)
        save_checkpoint(self.netG_B2A,
                        './results/cyclegan/checkpoints/netG_B2A_final.pth',
                        self.device)

        save_checkpoint(self.netD_A,
                        './results/cyclegan/checkpoints/netD_A_final.pth',
                        self.device)
        save_checkpoint(self.netD_B,
                        './results/cyclegan/checkpoints/netD_B_final.pth',
                        self.device)
Beispiel #8
0
    def train():
        for epoch in range(num_epoch):
            self.optim_D = lr_scheduler(self.optim_D, epoch, self.lr)
            self.optim_G = lr_scheduler(self.optim_G, epoch, self.lr)

            for iters, (real_img, real_label) in enumerate(self.dloader):
                N, C, H, W = real_img.size()   

                ''' ------------------------------ 1. Train D ------------------------------ '''
                
                real_img = util.var(real_img, requires_grad=False)    
                real_label = util.var(real_label, requires_grad=False)

                # Source and classification loss with real img and real label
                real_src_score, real_cls_score = self.D(real_img)

                real_src_loss = -torch.mean(real_src_score)
                real_cls_loss = BCE_loss(real_cls_score, real_label) / N

                # Make random target label and c using real label and concat with real img
                target_label = real_label[torch.randperm(real_label.size(0)).type(self.itype)]

                # Source and classification loss with fake img and target label
                fake_img = self.G(real_img, target_label)
                fake_src_score, fake_cls_score = self.D(fake_img)
                fake_src_loss = torch.mean(fake_src_score)

                # Gradient Penalty
                alpha = torch.rand(N, 1, 1, 1).type(self.dtype)
                x_hat = util.var((alpha * real_img.data + (1 - alpha) * fake_img.data), requires_grad=True)
                x_hat_score, _ = self.D(x_hat)

                grad = torch.autograd.grad(outputs=x_hat_score,
                                           inputs=x_hat,
                                           grad_outputs=torch.ones(x_hat_score.size()).type(self.dtype),
                                           retain_graph=True,
                                           create_graph=True,
                                           only_inputs=True)[0]                  

                grad = grad.view(grad.size()[0], -1)
                grad_l2norm = torch.sqrt(torch.sum(grad ** 2, dim=1))            
                gp_loss = self.lambda_gp * torch.mean((grad_l2norm - 1)**2)

                # Total Loss and update
                D_loss = real_src_loss + fake_src_loss + real_cls_loss + gp_loss

                self.optim_D.zero_grad()
                self.optim_G.zero_grad()
                D_loss.backward()
                self.optim_D.step()

                ''' ------------------------------ 1. Train G ------------------------------ '''
                
                if iters % self.n_critic == 0:
                    # Source and classification loss with fake img and target label
                    fake_img = self.G(real_img, target_label)
                    recon_img = self.G(fake_img, real_label)

                    fake_src_score, fake_cls_score = self.D(fake_img)
                    fake_src_loss = -torch.mean(fake_src_score)
                    fake_cls_loss = self.BCE_loss(fake_cls_score, target_label)
                    recon_loss = self.lambda_recon * self.L1_loss(recon_img, real_img)

                    # Total loss and update
                    G_loss = fake_src_loss + fake_cls_loss + recon_loss

                    self.optim_D.zero_grad()
                    self.optim_G.zero_grad()
                    G_loss.backward()
                    self.optim_G.step()

                if iters % self.save_every == 0:
                    if os.path.exists(self.result_dir) is False:
                        os.makedirs(self.result_dir)

                    if os.path.exists(self.weight_dir) is False:
                        os.makedirs(self.weight_dir)

                    # Print loss
                    print('[Epoch : %d / Iter : %d] : D_loss : %f, G_loss : %f, real_cls : %f, fake_cls : %f'\
                              %(epoch, iters, D_loss.data[0], G_loss.data[0], \
                                real_cls_loss.data[0], fake_cls_loss.data[0]))
                    # Save image
                    img_name = str(epoch) + '_' + str(iters) + '.png'
                    img_path = os.path.join(self.result_dir, img_name)

                    real_img = util.denorm(real_img)
                    fake_img = util.denorm(fake_img)
                    util.save_img(real_img, fake_img, img_path)

                    # Save weight
                    torch.save(self.D.state_dict(), os.path.join(self.weight_dir, 'D.pkl'))
                    torch.save(self.G.state_dict(), os.path.join(self.weight_dir, 'G.pkl'))

            # Save weight at the end of every epoch
            torch.save(self.D.state_dict(), os.path.join(self.weight_dir, 'D.pkl'))
            torch.save(self.G.state_dict(), os.path.join(self.weight_dir, 'G.pkl'))

        # Save weight at the end of training
        torch.save(self.D.state_dict(), os.path.join(self.weight_dir, 'D.pkl'))
        torch.save(self.G.state_dict(), os.path.join(self.weight_dir, 'G.pkl'))
Beispiel #9
0
def train(start, epoch):
    last_time = time.time()
    epoch_time = time.time()
    print('-- Current Epoch: %d'%epoch)

    adjust_learning_rate(G_A_Optimizer, epoch)
    adjust_learning_rate(G_B_Optimizer, epoch)
    adjust_learning_rate(D_A_Optimizer, epoch)
    adjust_learning_rate(D_B_Optimizer, epoch)

    for i, (a_real, b_real) in enumerate(itertools.izip(a_loader, b_loader)):
        # Train Generators
        a_real = Variable(a_real[0])
        b_real = Variable(b_real[0])
        if use_cuda:
            a_real = a_real.cuda()
            b_real = b_real.cuda()

        a_fake = G_A(b_real)
        b_fake = G_B(a_real)
        a_rec = G_A(b_fake)
        b_rec = G_B(a_fake)
        a_fake_result = D_A(a_fake)
        b_fake_result = D_B(b_fake)

        real_labels = Variable(torch.ones(a_fake_result.size()))
        if use_cuda:
            real_labels = real_labels.cuda()

        G_A_loss = MSE_Loss(a_fake_result, real_labels)
        G_B_loss = MSE_Loss(b_fake_result, real_labels)
        a_rec_loss = L1_Loss(a_rec, a_real)
        b_rec_loss = L1_Loss(b_rec, b_real)
        G_loss = G_A_loss + G_B_loss + a_rec_loss*10 + b_rec_loss*10

        G_A.zero_grad()
        G_B.zero_grad()
        G_loss.backward()
        G_A_Optimizer.step()
        G_B_Optimizer.step()

        # Train Discriminators
        a_fake = Variable(torch.Tensor(a_fake_pool([a_fake.cpu().data.numpy()])[0]))
        b_fake = Variable(torch.Tensor(b_fake_pool([b_fake.cpu().data.numpy()])[0]))
        if use_cuda:
            a_fake = a_fake.cuda()
            b_fake = b_fake.cuda()

        a_real_result = D_A(a_real)
        a_fake_result = D_A(a_fake)
        b_real_result = D_B(b_real)
        b_fake_result = D_B(b_fake)

        real_labels = Variable(torch.ones(a_real_result.size()))
        fake_labels = Variable(torch.zeros(a_fake_result.size()))
        if use_cuda:
            real_labels = real_labels.cuda()
            fake_labels = fake_labels.cuda()

        D_A_real_loss = MSE_Loss(a_real_result, real_labels)
        D_A_fake_loss = MSE_Loss(a_fake_result, fake_labels)
        D_B_real_loss = MSE_Loss(b_real_result, real_labels)
        D_B_fake_loss = MSE_Loss(b_fake_result, fake_labels)

        D_A_loss = D_A_fake_loss + D_A_real_loss
        D_B_loss = D_B_fake_loss + D_B_real_loss

        D_A.zero_grad()
        D_B.zero_grad()
        D_A_loss.backward()
        D_B_loss.backward()
        D_A_Optimizer.step()
        D_B_Optimizer.step()

        # Log
        if i % config.log_frequency == 0:
            speed = time.time() - last_time
            last_time = time.time()
            format_str = ('Step: %d; Loss: G-A: %.3f, D-A: %.3f, G-B: %.3f, D-B: %.3f; Speed: %.2f sec/step')
            print(format_str % (i, G_A_loss, D_A_loss, G_B_loss, D_B_loss, speed/config.log_frequency))

    # Save Data
    print('-- Saving parameters and sample images.')
    state = {'G_A': G_A, 'G_B': G_B, 'D_A': D_A, 'D_B': D_B, 'epoch': epoch}
    if not os.path.isdir('checkpoint'):
        os.mkdir('checkpoint')
    torch.save(state, './checkpoint/cyclegan.nn')

    if epoch >= 10 and epoch % config.save_frequency == 0:
        # Test Images
        for i, (a_real_test, b_real_test) in enumerate(itertools.izip(a_test_loader, b_test_loader)):
            a_real_test = Variable(a_real_test[0])
            b_real_test = Variable(b_real_test[0])
            if use_cuda:
                a_real_test = a_real_test.cuda()
                b_real_test = b_real_test.cuda()
            
            a_fake_test = G_A(b_real_test)
            b_fake_test = G_B(a_real_test)
            a_rec_test = G_A(b_fake_test)
            b_rec_test = G_B(a_fake_test)

            test = torch.cat([a_real_test, b_fake_test, a_rec_test, b_real_test, a_fake_test, b_rec_test], dim=0)
            test = util.denorm(test).data
            if not os.path.isdir('result'):
                os.mkdir('result')
            save_image(test, 'result/test%d-epoch-%d.jpg' % (i, epoch))
    else:
        # Sample Image
        a_fake_fixed = G_A(b_real_fixed)
        b_fake_fixed = G_B(a_real_fixed)
        a_rec_fixed = G_A(b_fake_fixed)
        b_rec_fixed = G_B(a_fake_fixed)
        sample = torch.cat([a_real_fixed, b_fake_fixed, a_rec_fixed, b_real_fixed, a_fake_fixed, b_rec_fixed], dim=0)
        sample = util.denorm(sample).data
        if not os.path.isdir('result'):
            os.mkdir('result')
        save_image(sample, 'result/sample-epoch-%d.jpg' % (epoch))
        
    epoch_time = (time.time() - epoch_time)/60
    time_remain = (epoch_time * (config.max_epoch - epoch))/60
    print('-- Epoch %d completed. Epoch Time: %.2f min, Time Est: %.2f hour.' %(epoch, epoch_time, time_remain))
def train(start, epoch, config):
    last_time = time.time()
    epoch_time = time.time()
    for idx, (image, _) in enumerate(loader):
        # Discriminator
        net_d.zero_grad()
        real = Variable(image)
        noise = Variable(torch.Tensor(config.batch_size, config.noise_dim))
        noise.data.normal_(0.0, 1.0)
        if use_cuda:
            real = real.cuda()
            noise = noise.cuda()

        real_d = net_d(real)
        real_label = Variable(torch.ones(real_d.size()))
        if use_cuda:
            real_label = real_label.cuda()
        cost_d_real = bce(real_d, real_label)

        #if idx % config.k_d_real == 0:
        cost_d_real.backward()

        fake = net_g(noise)
        fake_d = net_d(fake.detach())
        fake_label = Variable(torch.zeros(fake_d.size()))
        if use_cuda:
            fake_label = fake_label.cuda()
        cost_d_fake = bce(fake_d, fake_label)

        #if idx % config.k_d_fake == 0:
        cost_d_fake.backward()

        cost_d = cost_d_fake + cost_d_real
        opt_d.step()

        # Generator
        net_g.zero_grad()
        fake_g = net_d(fake)

        real_label = Variable(torch.ones(fake_g.size()))
        if use_cuda:
            real_label = real_label.cuda()

        cost_g = bce(fake_g, real_label)

        #if idx % config.k_g == 0:
        cost_g.backward()
        opt_g.step()

        # Log
        if idx % config.log_frequency == 0:
            speed = time.time() - last_time
            last_time = time.time()
            format_str = (
                'Epoch: %d, Step: %d, G-Loss: %.3f, D-Loss: %.3f, Speed: %.2f sec/step'
            )
            print(format_str %
                  (epoch, idx, cost_g, cost_d, speed / config.log_frequency))

    # Saving Data
    fake_fixed = net_g(fixed)
    state = {
        'net_g': net_g,
        'net_d': net_d,
        'epoch': epoch,
    }
    if not os.path.isdir('checkpoint'):
        os.mkdir('checkpoint')
    torch.save(state, './checkpoint/dcgan.nn')
    if (epoch > 15 and epoch % config.save_frequency == 0) or (epoch <= 15):
        save_image(
            util.denorm(fake_fixed).data,
            '%s/fixed_%d.jpg' % (config.result_path, epoch))
        save_image(
            util.denorm(fake).data,
            '%s/fake_%d.jpg' % (config.result_path, epoch))
    print('-- Models and test images saved.')
    epoch_time = (time.time() - epoch_time) / 60
    time_remain = (epoch_time * (config.final_epoch - epoch)) / 60
    print('-- Epoch completed. Epoch Time: %.2f min, Time Est: %.2f hour.' %
          (epoch_time, time_remain))
Beispiel #11
0
 def save_images(self, epoch, *args):
     x_concat = torch.cat(args, dim=3)
     sample_path = os.path.join(self.vis_dir, str(epoch).zfill(5) + '.png')
     save_image(denorm(x_concat.data.cpu()), sample_path, nrow=8, padding=0)
Beispiel #12
0
    def train(self, epochs: int = 100):

        self.netG.to(self.device)
        self.netD.to(self.device)

        start_time = time.time()

        print("=====Train Start======")
        for epoch in range(epochs):
            for iter, (real_img, _) in enumerate(self.trainloader):
                self.netG.train()
                self.netD.train()

                batch_size = real_img.size(0)
                real_label = torch.ones(batch_size).to(self.device)
                fake_label = torch.zeros(batch_size).to(self.device)
                real_img = real_img.to(self.device)
                z = torch.randn(real_img.size(0), 256).to(self.device)

                ###################################################################################
                # (1) Update Discriminator
                # Compute the discriminator loss. You have to implement 4 types of loss functions ('gan', 'lsgan', 'wgan', 'wgan-gp').
                # You can implement 'wgan' loss function wihtout using self.criterion.
                # Note1 : Use self.criterion and self.type which is declared in the init function.
                # Note2 : Use the 'detach()' function appropriately.
                ###################################################################################

                lossD: torch.Tensor = None

                ### YOUR CODE HERE (~ 15 lines)
                for p in self.netD.parameters():
                    p.requires_grad = True

                if self.type == 'gan' or self.type == 'lsgan':
                    output_real = self.netD(real_img).view(-1)
                    lossD_r = self.criterion(output_real, real_label)

                    fake_img = self.netG(z)
                    output_fake = self.netD(fake_img.detach()).view(-1)
                    lossD_f = self.criterion(output_fake, fake_label)

                    lossD = self.D_weight * (lossD_r + lossD_f)
                else:
                    lossD_r = self.netD(real_img)
                    lossD_r = lossD_r.mean(0).view(1)

                    fake_img = self.netG(z)
                    lossD_f = self.netD(fake_img.detach())
                    lossD_f = lossD_f.mean(0).view(1)
                    # adversarial loss
                    lossD = lossD_f - lossD_r

                    if self.type == 'wgan-gp':
                        alpha = torch.rand(batch_size, 1, 1, 1)
                        alpha = alpha.cuda() if torch.cuda.is_available(
                        ) else alpha

                        gp_x = alpha * real_img + ((1 - alpha) * fake_img)

                        gp_x = gp_x.cuda() if torch.cuda.is_available(
                        ) else gp_x
                        gp_x = torch.autograd.Variable(gp_x,
                                                       requires_grad=True)

                        gp_x_logit = self.netD(gp_x)
                        gradient_penalty = self.GP_loss(gp_x_logit, gp_x)

                        lossD = lossD + self.lambda_term * gradient_penalty
                        ### END YOUR CODE

                #Test code
                if epoch == 0 and iter == 0:
                    test_lossD_function(self.type, lossD)

                self.netD.zero_grad()
                lossD.backward()
                self.optimizerD.step()

                ### Clipping the weights of Discriminator
                clip_value = 0.01

                if self.type == 'wgan':
                    ### YOUR CODE HERE (~2 lines)
                    for parm in self.netD.parameters():
                        parm.data.clamp_(-clip_value, clip_value)
                    ### END YOUR CODE

                ###################################################################################
                # (2) Update Generator
                # Compute the generator loss. You have to implement 4 types of loss functions ('gan', 'lsgan', 'wgan', 'wgan-gp').
                # You can implement 'wgan' and 'wgan-gp' loss functions without using self.criterion.
                ###################################################################################

                lossG: torch.Tensor = None
                ### YOUR CODE HERE (~ 10 lines)

                # if self.type == 'gan' or self.type == 'lsgan' or (iter+1) % self.n_critic == 0:
                for p in self.netD.parameters():
                    p.requires_grad = False

                if self.type == 'gan' or self.type == 'lsgan':
                    output = self.netD(fake_img).view(-1)
                    lossG = self.criterion(output, real_label)
                else:
                    lossG = self.netD(fake_img).view(-1)
                    lossG = -lossG.mean()

                ### END YOUR CODE

                # Test code
                if epoch == 0 and iter == 0:
                    test_lossG_function(self.type, lossG)

                self.netG.zero_grad()
                lossG.backward()
                self.optimizerG.step()

                if (iter + 1) % 100 == 0:
                    end_time = time.time() - start_time
                    end_time = str(datetime.timedelta(seconds=end_time))[:-7]
                    # if self.type == 'wgan-gp':
                    #     print('GP: %.4f' % gradient_penalty)
                    print(
                        'Time [%s], Epoch [%d/%d], Step[%d/%d], lossD: %.4f, lossG: %.4f'
                        % (end_time, epoch + 1, epochs, iter + 1,
                           len(self.trainloader), lossD.item(), lossG.item()))

            # Save Images
            fake_img = fake_img.reshape(fake_img.size(0), 3, 32, 32)
            save_image(
                denorm(fake_img),
                os.path.join('./results/', self.type, 'images',
                             'fake_image-{:03d}.png'.format(epoch + 1)))

            if (epoch + 1) % 50 == 0:
                save_checkpoint(
                    self.netG,
                    os.path.join('./results', self.type, 'checkpoints',
                                 'netG_{:02d}.pth'.format(epoch + 1)),
                    self.device)
                save_checkpoint(
                    self.netD,
                    os.path.join('./results', self.type, 'checkpoints',
                                 'netD_{:02d}.pth'.format(epoch + 1)),
                    self.device)

        # Save Checkpoints
        save_checkpoint(
            self.netG,
            os.path.join('./results', self.type, 'checkpoints',
                         'netG_final.pth'), self.device)
        save_checkpoint(
            self.netD,
            os.path.join('./results', self.type, 'checkpoints',
                         'netD_final.pth'), self.device)
Beispiel #13
0
        x_ = train_img[:, :, 0:img_size, :]
        y_ = train_img[:, :, img_size:, :]

    if img_size != opt.input_size:
        x_ = util.imgs_resize(x_, opt.input_size)
        y_ = util.imgs_resize(y_, opt.input_size)

    x_ = util.norm(x_)
    y_ = util.norm(y_)

    test_img = sess.run(G, {x: x_})

    num_str = test_loader.file_list[iter][:test_loader.file_list[iter].find('.'
                                                                            )]
    path = opt.dataset + '_results/test_results/' + num_str + '_input.png'
    plt.imsave(path, (util.denorm(x_[0]) / 255))
    path = opt.dataset + '_results/test_results/' + num_str + '_output.png'
    plt.imsave(path, (util.denorm(test_img[0]) / 255))
    path = opt.dataset + '_results/test_results/' + num_str + '_target.png'
    plt.imsave(path, (util.denorm(y_[0]) / 255))

    per_end_time = time.time()
    per_ptime.append(per_end_time - per_start_time)

total_end_time = time.time()
total_ptime = total_end_time - total_start_time

print('total %d images generation complete!' % (iter + 1))
print(
    'Avg. one image process ptime: %.2f, total %d images process ptime: %.2f' %
    (np.mean(per_ptime), (iter + 1), total_ptime))
Beispiel #14
0
    def train(self, epochs: int = 100):

        self.netG_A2B.to(self.device)
        self.netG_B2A.to(self.device)
        self.netD_A.to(self.device)
        self.netD_B.to(self.device)

        start_time = time.time()

        print("=====Train Start======")

        for epoch in range(epochs):
            for iter, (real_A, real_B) in enumerate(self.trainloader):
                self.netG_A2B.train()
                self.netG_B2A.train()
                self.netD_A.train()
                self.netD_B.train()

                real_A = real_A.to(self.device)
                real_B = real_B.to(self.device)

                ###################################################################################
                # (1) Update Generator
                # Train the generator (self.netG_A2B & self.netG_B2A).
                # You have to implement 3 types of loss functions ('identity loss', 'gan loss', 'cycle consistency loss')
                ###################################################################################

                identity_loss: torch.Tensor() = None
                gan_loss: torch.Tensor() = None
                cycle_loss: torch.Tensor() = None
                lossG: torch.Tensor() = None

                ### YOUR CODE HERE (~ 15 lines)

                ### END YOUR CODE

                # Test code
                if epoch == 0 and iter == 0:
                    test_lossG_fuction(identity_loss, gan_loss, cycle_loss)

                self.optimizerG.zero_grad()
                lossG.backward()
                self.optimizerG.step()

                ###################################################################################
                # (2) Update Discriminator
                # Train the discrminator (self.netD_A & self.netD_B).
                ###################################################################################
                # Discriminator A

                lossD_A: torch.Tensor() = None

                ### YOUR CODE HERE (~ 4 lines)

                ### END YOUR CODE

                self.optimizerD_A.zero_grad()
                lossD_A.backward()
                self.optimizerD_A.step()

                # Discriminator B

                lossD_B: torch.Tensor() = None

                ### YOUR CODE HERE (~ 4 lines)

                ### END YOUR CODE

                # Test code
                if epoch == 0 and iter == 0:
                    test_lossD_fuction(lossD_A, lossD_B)

                self.optimizerD_B.zero_grad()
                lossD_B.backward()
                self.optimizerD_B.step()

                if (iter + 1) % 100 == 0:
                    end_time = time.time() - start_time
                    end_time = str(datetime.timedelta(seconds=end_time))[:-7]
                    print(
                        'Time [%s], Epoch [%d/%d], Step[%d/%d], lossD_A: %.4f, lossD_B: %.4f, lossG: %.4f'
                        % (end_time, epoch + 1, epochs, iter + 1,
                           len(self.trainloader), lossD_A.item(),
                           lossD_A.item(), lossG.item()))

            # Save Images
            fake_A = fake_A.reshape(fake_A.size(0), 3, 256, 256)
            fake_B = fake_B.reshape(fake_B.size(0), 3, 256, 256)

            save_image(
                denorm(fake_B),
                os.path.join('./results/cyclegan/images',
                             'fakeA2B-{:03d}.png'.format(epoch + 1)))
            save_image(
                denorm(fake_A),
                os.path.join('./results/cyclegan/images',
                             'fakeB2A-{:03d}.png'.format(epoch + 1)))

            if (epoch + 1) % 10 == 0:
                save_checkpoint(
                    self.netG_A2B,
                    './results/cyclegan/checkpoints/netG_A2B_{:02d}.pth'.
                    format(epoch + 1), self.device)
                save_checkpoint(
                    self.netG_B2A,
                    './results/cyclegan/checkpoints/netG_B2A_{:02d}.pth'.
                    format(epoch + 1), self.device)

                save_checkpoint(
                    self.netD_A,
                    './results/cyclegan/checkpoints/netD_A_{:02d}.pth'.format(
                        epoch + 1), self.device)
                save_checkpoint(
                    self.netD_B,
                    './results/cyclegan/checkpoints/netD_B_{:02d}.pth'.format(
                        epoch + 1), self.device)

        # Save Checkpoints
        save_checkpoint(self.netG_A2B,
                        './results/cyclegan/checkpoints/netG_A2B_final.pth',
                        self.device)
        save_checkpoint(self.netG_B2A,
                        './results/cyclegan/checkpoints/netG_B2A_final.pth',
                        self.device)

        save_checkpoint(self.netD_A,
                        './results/cyclegan/checkpoints/netD_A_final.pth',
                        self.device)
        save_checkpoint(self.netD_B,
                        './results/cyclegan/checkpoints/netD_B_final.pth',
                        self.device)
    def train(self, epochs: int = 100):

        self.netG.to(self.device)
        self.netD.to(self.device)

        start_time = time.time()

        print("=====Train Start======")

        for epoch in range(epochs):
            for iter, (real_img, _) in enumerate(self.trainloader):
                self.netG.train()
                self.netD.train()

                batch_size = real_img.size(0)
                real_label = torch.ones(batch_size).to(self.device)
                fake_label = torch.zeros(batch_size).to(self.device)
                real_img = real_img.to(self.device)
                z = torch.randn(real_img.size(0), 256).to(self.device)

                ###################################################################################
                # (1) Update Discriminator
                # Compute the discriminator loss. You have to implement 4 types of loss functions ('gan', 'lsgan', 'wgan', 'wgan-gp').
                # You can implement 'wgan' loss function wihtout using self.criterion.
                # Note1 : Use self.criterion and self.type which is declared in the init function.
                # Note2 : Use the 'detach()' function appropriately.
                ###################################################################################

                lossD: torch.Tensor = None

                ### YOUR CODE HERE (~ 15 lines)

                ### END YOUR CODE

                # Test code
                if epoch == 0 and iter == 0:
                    test_lossD_function(self.type, lossD)

                self.netD.zero_grad()
                lossD.backward()
                self.optimizerD.step()

                ### Clipping the weights of Discriminator
                clip_value = 0.01

                if self.type == 'wgan':
                    ### YOUR CODE HERE (~2 lines)
                    pass

                    ### END YOUR CODE

                ###################################################################################
                # (2) Update Generator
                # Compute the generator loss. You have to implement 4 types of loss functions ('gan', 'lsgan', 'wgan', 'wgan-gp').
                # You can implement 'wgan' and 'wgan-gp' loss functions without using self.criterion.
                ###################################################################################

                lossG: torch.Tensor = None

                ### YOUR CODE HERE (~ 10 lines)

                ### END YOUR CODE

                # Test code
                if epoch == 0 and iter == 0:
                    test_lossG_function(self.type, lossG)

                self.netG.zero_grad()
                lossG.backward()
                self.optimizerG.step()

                if (iter + 1) % 100 == 0:
                    end_time = time.time() - start_time
                    end_time = str(datetime.timedelta(seconds=end_time))[:-7]
                    print(
                        'Time [%s], Epoch [%d/%d], Step[%d/%d], lossD: %.4f, lossG: %.4f'
                        % (end_time, epoch + 1, epochs, iter + 1,
                           len(self.trainloader), lossD.item(), lossG.item()))

            # Save Images
            fake_img = fake_img.reshape(fake_img.size(0), 3, 32, 32)
            save_image(
                denorm(fake_img),
                os.path.join('./results/', self.type, 'images',
                             'fake_image-{:03d}.png'.format(epoch + 1)))

            if (epoch + 1) % 50 == 0:
                save_checkpoint(
                    self.netG,
                    os.path.join('./results', self.type, 'checkpoints',
                                 'netG_{:02d}.pth'.format(epoch + 1)),
                    self.device)
                save_checkpoint(
                    self.netD,
                    os.path.join('./results', self.type, 'checkpoints',
                                 'netD_{:02d}.pth'.format(epoch + 1)),
                    self.device)

        # Save Checkpoints
        save_checkpoint(
            self.netG,
            os.path.join('./results', self.type, 'checkpoints',
                         'netG_final.pth'), self.device)
        save_checkpoint(
            self.netD,
            os.path.join('./results', self.type, 'checkpoints',
                         'netD_final.pth'), self.device)