Exemple #1
0
def train(args):

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    transform = transforms.Compose(
        [crop(args.scale, args.patch_size),
         augmentation()])
    dataset = mydata(GT_path=args.GT_path,
                     LR_path=args.LR_path,
                     in_memory=args.in_memory,
                     transform=transform)
    loader = DataLoader(dataset,
                        batch_size=args.batch_size,
                        shuffle=True,
                        num_workers=args.num_workers)

    generator = Generator(img_feat=3,
                          n_feats=64,
                          kernel_size=3,
                          num_block=args.res_num,
                          scale=args.scale)

    if args.fine_tuning:
        generator.load_state_dict(torch.load(args.generator_path))
        print("pre-trained model is loaded")
        print("path : %s" % (args.generator_path))

    generator = generator.to(device)
    generator.train()

    l2_loss = nn.MSELoss()
    g_optim = optim.Adam(generator.parameters(), lr=1e-4)

    pre_epoch = 0
    fine_epoch = 0

    #### Train using L2_loss
    while pre_epoch < args.pre_train_epoch:
        for i, tr_data in enumerate(loader):
            gt = tr_data['GT'].to(device)
            lr = tr_data['LR'].to(device)

            output, _ = generator(lr)
            loss = l2_loss(gt, output)

            g_optim.zero_grad()
            loss.backward()
            g_optim.step()

        pre_epoch += 1

        if pre_epoch % 2 == 0:
            print(pre_epoch)
            print(loss.item())
            print('=========')

        if pre_epoch % 800 == 0:
            torch.save(generator.state_dict(),
                       './model/pre_trained_model_%03d.pt' % pre_epoch)

    #### Train using perceptual & adversarial loss
    vgg_net = vgg19().to(device)
    vgg_net = vgg_net.eval()

    discriminator = Discriminator(patch_size=args.patch_size * args.scale)
    discriminator = discriminator.to(device)
    discriminator.train()

    d_optim = optim.Adam(discriminator.parameters(), lr=1e-4)
    scheduler = optim.lr_scheduler.StepLR(g_optim, step_size=2000, gamma=0.1)

    VGG_loss = perceptual_loss(vgg_net)
    cross_ent = nn.BCELoss()
    tv_loss = TVLoss()
    real_label = torch.ones((args.batch_size, 1)).to(device)
    fake_label = torch.zeros((args.batch_size, 1)).to(device)

    while fine_epoch < args.fine_train_epoch:

        scheduler.step()

        for i, tr_data in enumerate(loader):
            gt = tr_data['GT'].to(device)
            lr = tr_data['LR'].to(device)

            ## Training Discriminator
            output, _ = generator(lr)
            fake_prob = discriminator(output)
            real_prob = discriminator(gt)

            d_loss_real = cross_ent(real_prob, real_label)
            d_loss_fake = cross_ent(fake_prob, fake_label)

            d_loss = d_loss_real + d_loss_fake

            g_optim.zero_grad()
            d_optim.zero_grad()
            d_loss.backward()
            d_optim.step()

            ## Training Generator
            output, _ = generator(lr)
            fake_prob = discriminator(output)

            _percep_loss, hr_feat, sr_feat = VGG_loss((gt + 1.0) / 2.0,
                                                      (output + 1.0) / 2.0,
                                                      layer=args.feat_layer)

            L2_loss = l2_loss(output, gt)
            percep_loss = args.vgg_rescale_coeff * _percep_loss
            adversarial_loss = args.adv_coeff * cross_ent(
                fake_prob, real_label)
            total_variance_loss = args.tv_loss_coeff * tv_loss(
                args.vgg_rescale_coeff * (hr_feat - sr_feat)**2)

            g_loss = percep_loss + adversarial_loss + total_variance_loss + L2_loss

            g_optim.zero_grad()
            d_optim.zero_grad()
            g_loss.backward()
            g_optim.step()

        fine_epoch += 1

        if fine_epoch % 2 == 0:
            print(fine_epoch)
            print(g_loss.item())
            print(d_loss.item())
            print('=========')

        if fine_epoch % 500 == 0:
            torch.save(generator.state_dict(),
                       './model/SRGAN_gene_%03d.pt' % fine_epoch)
            torch.save(discriminator.state_dict(),
                       './model/SRGAN_discrim_%03d.pt' % fine_epoch)
Exemple #2
0
                    'ssims'] / valing_results['batch_sizes']

                sum_val_psnr += valing_results['psnr']
                sum_val_ssim += valing_results['ssim']

        # save model parameters:
        if not os.path.exists(epoch_path):
            os.makedirs(epoch_path)

        # save model every 25 / 50 epochs
        if epoch % v == 0 and epoch != 0:
            torch.save(
                G.state_dict(), epoch_path + "/" + 'netG_epoch_%d_%d.pth' %
                (upscale_factor, epoch))
            torch.save(
                D.state_dict(), epoch_path + "/" + 'netD_epoch_%d_%d.pth' %
                (upscale_factor, epoch))
            # save loss, scores, psnr, ssim
            results['d_loss'].append(running_results['d_loss'] /
                                     running_results['batch_sizes'])
            results['g_loss'].append(running_results['g_loss'] /
                                     running_results['batch_sizes'])
            results['d_score'].append(running_results['d_score'] /
                                      running_results['batch_sizes'])
            results['g_score'].append(running_results['g_score'] /
                                      running_results['batch_sizes'])

            results['psnr'].append(valing_results['psnr'])
            results['ssim'].append(valing_results['ssim'])

            avg_psnr = sum_val_psnr / batch_Size