Example #1
0
    def test(self):
        # networks
        self.num_recursions = 16
        self.model = Net(num_channels=self.num_channels,
                         base_filter=256,
                         num_recursions=self.num_recursions)

        if self.gpu_mode:
            self.model.cuda()

        # load model
        self.load_model()

        # load dataset
        test_data_loader = self.load_dataset(dataset='test')

        # Test
        print('Test is started.')
        img_num = 0
        self.model.eval()
        for input, target in test_data_loader:
            # input data (bicubic interpolated image)
            if self.gpu_mode:
                y_ = Variable(
                    utils.img_interp(input, self.scale_factor).cuda())
            else:
                y_ = Variable(utils.img_interp(input, self.scale_factor))

            # prediction
            _, recon_imgs = self.model(y_)
            for i, recon_img in enumerate(recon_imgs):
                img_num += 1
                recon_img = recon_imgs[i].cpu().data
                gt_img = target[i]
                lr_img = input[i]
                bc_img = utils.img_interp(input[i], self.scale_factor)

                # calculate psnrs
                bc_psnr = utils.PSNR(bc_img, gt_img)
                recon_psnr = utils.PSNR(recon_img, gt_img)

                # save result images
                result_imgs = [gt_img, lr_img, bc_img, recon_img]
                psnrs = [None, None, bc_psnr, recon_psnr]
                utils.plot_test_result(result_imgs,
                                       psnrs,
                                       img_num,
                                       save_dir=self.save_dir)

                print("Saving %d test result images..." % img_num)
Example #2
0
    def test(self):
        # networks
        self.model = Net(num_channels=self.num_channels, base_filter=64, scale_factor=self.scale_factor)

        if self.gpu_mode:
            self.model.cuda()

        # load model
        self.load_model()

        # load dataset
        test_data_loader = self.load_dataset(dataset='test')

        # Test
        print('Test is started.')
        img_num = 0
        self.model.eval()
        for input, target in test_data_loader:
            # input data (low resolution image)
            if self.gpu_mode:
                y_ = Variable(input.cuda())
            else:
                y_ = Variable(input)

            # prediction
            recon_imgs = self.model(y_)
            for i in range(self.test_batch_size):
                img_num += 1
                recon_img = recon_imgs[i].cpu().data
                gt_img = utils.shave(target[i], border_size=8 * self.scale_factor)
                lr_img = utils.shave(input[i], border_size=8)
                bc_img = utils.shave(utils.img_interp(input[i], self.scale_factor), border_size=8 * self.scale_factor)

                # calculate psnrs
                bc_psnr = utils.PSNR(bc_img, gt_img)
                recon_psnr = utils.PSNR(recon_img, gt_img)

                # save result images
                result_imgs = [gt_img, lr_img, bc_img, recon_img]
                psnrs = [None, None, bc_psnr, recon_psnr]
                utils.plot_test_result(result_imgs, psnrs, img_num, save_dir=self.save_dir)

                print("Saving %d test result images..." % img_num)
              (epoch + 1, params.num_epochs, i + 1, len(train_data_loader),
               D_loss.data[0], G_loss.data[0]))

    D_avg_loss = torch.mean(torch.FloatTensor(D_losses))
    G_avg_loss = torch.mean(torch.FloatTensor(G_losses))

    # avg loss values for plot
    D_avg_losses.append(D_avg_loss)
    G_avg_losses.append(G_avg_loss)

    # Show result for test image
    gen_image = G(Variable(test_input))  #.cuda()))
    gen_image = gen_image.cpu().data
    utils.plot_test_result(test_input,
                           test_target,
                           gen_image,
                           epoch,
                           save=True,
                           save_dir=save_dir)

np.savetxt("D_losses.csv", D_losses, delimiter=",")
np.savetxt("G_losses.csv", G_losses, delimiter=",")
# Plot average losses
utils.plot_loss(D_losses,
                G_losses,
                params.num_epochs,
                save=True,
                save_dir=save_dir)

# Make gif
utils.make_gif(params.dataset, params.num_epochs, save_dir=save_dir)
    def test(self):
        # networks
        self.G = Generator(num_channels=self.num_channels,
                           base_filter=64,
                           num_residuals=16)

        if self.gpu_mode:
            self.G.cuda()

        # load model
        self.load_model()

        # load dataset
        for test_dataset in self.test_dataset:
            test_data_loader = self.load_dataset(dataset=test_dataset,
                                                 is_train=False)

            # Test
            print('Test is started.')
            img_num = 0
            total_img_num = len(test_data_loader)
            self.G.eval()
            for lr, hr, bc in test_data_loader:
                # input data (low resolution image)
                if self.num_channels == 1:
                    y_ = Variable(utils.norm(lr[:, 0].unsqueeze(1), vgg=True))
                else:
                    y_ = Variable(utils.norm(lr, vgg=True))

                if self.gpu_mode:
                    y_ = y_.cuda()

                # prediction
                recon_imgs = self.G(y_)
                for i, recon_img in enumerate(recon_imgs):
                    img_num += 1
                    sr_img = utils.denorm(recon_img.cpu().data, vgg=True)

                    # save result image
                    save_dir = os.path.join(self.save_dir, 'test_result',
                                            test_dataset)
                    utils.save_img(sr_img, img_num, save_dir=save_dir)

                    # calculate psnrs
                    if self.num_channels == 1:
                        gt_img = hr[i][0].unsqueeze(0)
                        lr_img = lr[i][0].unsqueeze(0)
                        bc_img = bc[i][0].unsqueeze(0)
                    else:
                        gt_img = hr[i]
                        lr_img = lr[i]
                        bc_img = bc[i]

                    bc_psnr = utils.PSNR(bc_img, gt_img)
                    recon_psnr = utils.PSNR(sr_img, gt_img)

                    # plot result images
                    result_imgs = [gt_img, lr_img, bc_img, sr_img]
                    psnrs = [None, None, bc_psnr, recon_psnr]
                    utils.plot_test_result(result_imgs,
                                           psnrs,
                                           img_num,
                                           save_dir=save_dir)

                    print('Test DB: %s, Saving result images...[%d/%d]' %
                          (test_dataset, img_num, total_img_num))

            print('Test is finishied.')
    def train(self):
        # load dataset
        train_data_loader = self.load_dataset(dataset=self.train_dataset,
                                              is_train=True)
        test_data_loader = self.load_dataset(dataset=self.test_dataset[0],
                                             is_train=False)

        # networks
        self.G = Generator(num_channels=self.num_channels,
                           base_filter=64,
                           num_residuals=16)
        self.D = Discriminator(num_channels=self.num_channels,
                               base_filter=64,
                               image_size=self.crop_size)

        # weigh initialization
        self.G.weight_init()
        self.D.weight_init()

        # For the content loss
        self.feature_extractor = FeatureExtractor(
            models.vgg19(pretrained=True))

        # optimizer
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=self.lr,
                                      betas=(0.9, 0.999))
        # self.D_optimizer = optim.Adam(self.D.parameters(), lr=self.lr, betas=(0.9, 0.999))
        self.D_optimizer = optim.SGD(self.D.parameters(),
                                     lr=self.lr / 100,
                                     momentum=0.9,
                                     nesterov=True)

        # loss function
        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.feature_extractor.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
            self.BCE_loss = nn.BCELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()
            self.BCE_loss = nn.BCELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('----------------------------------------------')

        # set the logger
        G_log_dir = os.path.join(self.save_dir, 'G_logs')
        if not os.path.exists(G_log_dir):
            os.mkdir(G_log_dir)
        G_logger = Logger(G_log_dir)

        D_log_dir = os.path.join(self.save_dir, 'D_logs')
        if not os.path.exists(D_log_dir):
            os.mkdir(D_log_dir)
        D_logger = Logger(D_log_dir)

        ################# Pre-train generator #################
        self.epoch_pretrain = 50

        # Load pre-trained parameters of generator
        if not self.load_model(is_pretrain=True):
            # Pre-training generator for 50 epochs
            print('Pre-training is started.')
            self.G.train()
            for epoch in range(self.epoch_pretrain):
                for iter, (lr, hr, _) in enumerate(train_data_loader):
                    # input data (low resolution image)
                    if self.num_channels == 1:
                        x_ = Variable(
                            utils.norm(hr[:, 0].unsqueeze(1), vgg=True))
                        y_ = Variable(
                            utils.norm(lr[:, 0].unsqueeze(1), vgg=True))
                    else:
                        x_ = Variable(utils.norm(hr, vgg=True))
                        y_ = Variable(utils.norm(lr, vgg=True))

                    if self.gpu_mode:
                        x_ = x_.cuda()
                        y_ = y_.cuda()

                    # Train generator
                    self.G_optimizer.zero_grad()
                    recon_image = self.G(y_)

                    # Content losses
                    content_loss = self.MSE_loss(recon_image, x_)

                    # Back propagation
                    G_loss_pretrain = content_loss
                    G_loss_pretrain.backward()
                    self.G_optimizer.step()

                    # log
                    print("Epoch: [%2d] [%4d/%4d] G_loss_pretrain: %.8f" %
                          ((epoch + 1), (iter + 1), len(train_data_loader),
                           G_loss_pretrain.item()))

            print('Pre-training is finished.')

            # Save pre-trained parameters of generator
            self.save_model(is_pretrain=True)

        ################# Adversarial train #################
        print('Training is started.')
        # Avg. losses
        G_avg_loss = []
        D_avg_loss = []
        step = 0

        # test image
        test_lr, test_hr, test_bc = test_data_loader.dataset.__getitem__(2)
        test_lr = test_lr.unsqueeze(0)
        test_hr = test_hr.unsqueeze(0)
        test_bc = test_bc.unsqueeze(0)

        self.G.train()
        self.D.train()
        for epoch in range(self.num_epochs):

            # learning rate is decayed by a factor of 10 every 20 epoch
            if (epoch + 1) % 20 == 0:
                for param_group in self.G_optimizer.param_groups:
                    param_group["lr"] /= 10.0
                print("Learning rate decay for G: lr={}".format(
                    self.G_optimizer.param_groups[0]["lr"]))
                for param_group in self.D_optimizer.param_groups:
                    param_group["lr"] /= 10.0
                print("Learning rate decay for D: lr={}".format(
                    self.D_optimizer.param_groups[0]["lr"]))

            G_epoch_loss = 0
            D_epoch_loss = 0
            for iter, (lr, hr, _) in enumerate(train_data_loader):
                # input data (low resolution image)
                mini_batch = lr.size()[0]

                if self.num_channels == 1:
                    x_ = Variable(utils.norm(hr[:, 0].unsqueeze(1), vgg=True))
                    y_ = Variable(utils.norm(lr[:, 0].unsqueeze(1), vgg=True))
                else:
                    x_ = Variable(utils.norm(hr, vgg=True))
                    y_ = Variable(utils.norm(lr, vgg=True))

                if self.gpu_mode:
                    x_ = x_.cuda()
                    y_ = y_.cuda()
                    # labels
                    real_label = Variable(torch.ones(mini_batch).cuda())
                    fake_label = Variable(torch.zeros(mini_batch).cuda())
                else:
                    # labels
                    real_label = Variable(torch.ones(mini_batch))
                    fake_label = Variable(torch.zeros(mini_batch))

                # Reset gradient
                self.D_optimizer.zero_grad()

                # Train discriminator with real data
                D_real_decision = self.D(x_)
                D_real_loss = self.BCE_loss(D_real_decision[:, 0], real_label)

                # Train discriminator with fake data
                recon_image = self.G(y_)
                D_fake_decision = self.D(recon_image)
                D_fake_loss = self.BCE_loss(D_fake_decision[:, 0], fake_label)

                D_loss = D_real_loss + D_fake_loss

                # Back propagation
                D_loss.backward()
                self.D_optimizer.step()

                # Reset gradient
                self.G_optimizer.zero_grad()

                # Train generator
                recon_image = self.G(y_)
                D_fake_decision = self.D(recon_image)

                # Adversarial loss
                GAN_loss = self.BCE_loss(D_fake_decision[:, 0], real_label)

                # Content losses
                mse_loss = self.MSE_loss(recon_image, x_)
                x_VGG = Variable(utils.norm(hr, vgg=True).cuda())
                recon_VGG = Variable(
                    utils.norm(recon_image.data, vgg=True).cuda())
                real_feature = self.feature_extractor(x_VGG)
                fake_feature = self.feature_extractor(recon_VGG)
                vgg_loss = self.MSE_loss(fake_feature, real_feature.detach())

                # Back propagation
                G_loss = mse_loss + 6e-3 * vgg_loss + 1e-3 * GAN_loss
                G_loss.backward()
                self.G_optimizer.step()

                # log
                G_epoch_loss += G_loss.item()
                D_epoch_loss += D_loss.item()
                print("Epoch: [%2d] [%4d/%4d] G_loss: %.8f, D_loss: %.8f" %
                      ((epoch + 1), (iter + 1), len(train_data_loader),
                       G_loss.item(), D_loss.item()))

                # tensorboard logging
                #G_logger.scalar_summary('losses', G_loss.item(), step + 1)
                #D_logger.scalar_summary('losses', D_loss.item(), step + 1)
                step += 1

            # avg. loss per epoch
            G_avg_loss.append(G_epoch_loss / len(train_data_loader))
            D_avg_loss.append(D_epoch_loss / len(train_data_loader))

            # prediction
            if self.num_channels == 1:
                y_ = Variable(utils.norm(test_lr[:, 0].unsqueeze(1), vgg=True))
            else:
                y_ = Variable(utils.norm(test_lr, vgg=True))

            if self.gpu_mode:
                y_ = y_.cuda()

            recon_img = self.G(y_)
            sr_img = utils.denorm(recon_img[0].cpu().data, vgg=True)

            # save result image
            save_dir = os.path.join(self.save_dir, 'train_result')
            utils.save_img(sr_img,
                           epoch + 1,
                           save_dir=save_dir,
                           is_training=True)
            print('Result image at epoch %d is saved.' % (epoch + 1))

            # Save trained parameters of model
            if (epoch + 1) % self.save_epochs == 0:
                self.save_model(epoch + 1)

        # calculate psnrs
        if self.num_channels == 1:
            gt_img = test_hr[0][0].unsqueeze(0)
            lr_img = test_lr[0][0].unsqueeze(0)
            bc_img = test_bc[0][0].unsqueeze(0)
        else:
            gt_img = test_hr[0]
            lr_img = test_lr[0]
            bc_img = test_bc[0]

        bc_psnr = utils.PSNR(bc_img, gt_img)
        recon_psnr = utils.PSNR(sr_img, gt_img)

        # plot result images
        result_imgs = [gt_img, lr_img, bc_img, sr_img]
        psnrs = [None, None, bc_psnr, recon_psnr]
        utils.plot_test_result(result_imgs,
                               psnrs,
                               self.num_epochs,
                               save_dir=save_dir,
                               is_training=True)
        print('Training result image is saved.')

        # Plot avg. loss
        utils.plot_loss([G_avg_loss, D_avg_loss],
                        self.num_epochs,
                        save_dir=self.save_dir)
        print("Training is finished.")

        # Save final trained parameters of model
        self.save_model(epoch=None)
Example #6
0
for i, real_A in enumerate(test_data_loader_A):

    print (len(real_A))
    # input image data
    real_A=real_A[0]
    print (len(real_A))
    print (real_A.shape)

    real_A = Variable(gpuAvailable(real_A, params.cuda))

    # A -> B -> A
    fake_B = G_A(real_A)
    recon_A = G_B(fake_B)

    # Show result for test data
    utils.plot_test_result(real_A, fake_B, recon_A, i, save=True, save_dir=save_dir + 'AtoB/')

    print('%d images are generated.' % (i + 1))

for i, real_B in enumerate(test_data_loader_B):

    # input image data
    real_B = real_B[0]
    real_B = Variable(gpuAvailable(real_B, params.cuda))

    # B -> A -> B
    fake_A = G_B(real_B)
    recon_B = G_A(fake_A)

    # Show result for test data
    utils.plot_test_result(real_B, fake_A, recon_B, i, save=True, save_dir=save_dir + 'BtoA/')
Example #7
0
    def train(self):
        # networks
        self.model = Net(num_channels=self.num_channels, base_filter=64)

        # weigh initialization
        self.model.weight_init(mean=0.0, std=0.001)

        # optimizer
        self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr)

        # loss function
        if self.gpu_mode:
            self.model.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.model)
        print('----------------------------------------------')

        # load dataset
        train_data_loader = self.load_dataset(dataset='train')
        test_data_loader = self.load_dataset(dataset='test')

        # set the logger
        log_dir = os.path.join(self.save_dir, 'logs')
        if not os.path.exists(log_dir):
            os.mkdir(log_dir)
        logger = Logger(log_dir)

        ################# Train #################
        print('Training is started.')
        avg_loss = []
        step = 0

        # test image
        test_input, test_target = test_data_loader.dataset.__getitem__(2)
        test_input = test_input.unsqueeze(0)
        test_target = test_target.unsqueeze(0)

        self.model.train()
        for epoch in range(self.num_epochs):

            epoch_loss = 0
            for iter, (input, target) in enumerate(train_data_loader):
                # input data (bicubic interpolated image)
                if self.gpu_mode:
                    # exclude border pixels from loss computation
                    x_ = Variable(utils.shave(target, border_size=8).cuda())
                    y_ = Variable(utils.img_interp(input, self.scale_factor).cuda())
                else:
                    x_ = Variable(utils.shave(target, border_size=8))
                    y_ = Variable(utils.img_interp(input, self.scale_factor))

                # update network
                self.optimizer.zero_grad()
                recon_image = self.model(y_)
                loss = self.MSE_loss(recon_image, x_)
                loss.backward()
                self.optimizer.step()

                # log
                epoch_loss += loss.data[0]
                print("Epoch: [%2d] [%4d/%4d] loss: %.8f" % ((epoch + 1), (iter + 1), len(train_data_loader), loss.data[0]))

                # tensorboard logging
                logger.scalar_summary('loss', loss.data[0], step + 1)
                step += 1

            # avg. loss per epoch
            avg_loss.append(epoch_loss / len(train_data_loader))

            # prediction
            recon_imgs = self.model(Variable(utils.img_interp(test_input, self.scale_factor).cuda()))
            recon_img = recon_imgs[0].cpu().data
            gt_img = utils.shave(test_target[0], border_size=8)
            lr_img = test_input[0]
            bc_img = utils.shave(utils.img_interp(test_input[0], self.scale_factor), border_size=8)

            # calculate psnrs
            bc_psnr = utils.PSNR(bc_img, gt_img)
            recon_psnr = utils.PSNR(recon_img, gt_img)

            # save result images
            result_imgs = [gt_img, lr_img, bc_img, recon_img]
            psnrs = [None, None, bc_psnr, recon_psnr]
            utils.plot_test_result(result_imgs, psnrs, epoch + 1, save_dir=self.save_dir, is_training=True)

            print("Saving training result images at epoch %d" % (epoch + 1))

            # Save trained parameters of model
            if (epoch + 1) % self.save_epochs == 0:
                self.save_model(epoch + 1)

        # Plot avg. loss
        utils.plot_loss([avg_loss], self.num_epochs, save_dir=self.save_dir)
        print("Training is finished.")

        # Save final trained parameters of model
        self.save_model(epoch=None)
Example #8
0
    def train(self):
        # networks
        self.model = Net(num_channels=self.num_channels,
                         base_filter=64,
                         num_residuals=18)

        # weigh initialization
        self.model.weight_init()

        # optimizer
        self.momentum = 0.9
        self.weight_decay = 0.0001
        self.clip = 0.4
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.lr,
                                   momentum=self.momentum,
                                   weight_decay=self.weight_decay)

        # loss function
        if self.gpu_mode:
            self.model.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.model)
        print('----------------------------------------------')

        # load dataset
        train_data_loader = self.load_dataset(dataset='train')
        test_data_loader = self.load_dataset(dataset='test')

        # set the logger
        log_dir = os.path.join(self.save_dir, 'logs')
        if not os.path.exists(log_dir):
            os.mkdir(log_dir)
        logger = Logger(log_dir)

        ################# Train #################
        print('Training is started.')
        avg_loss = []
        step = 0

        # test image
        test_input, test_target, test_bicubic = test_data_loader.dataset.__getitem__(
            2)
        test_input = test_input.unsqueeze(0)
        test_target = test_target.unsqueeze(0)

        self.model.train()
        for epoch in range(self.num_epochs):

            # learning rate is decayed by a factor of 10 every 20 epochs
            if (epoch + 1) % 20 == 0:
                for param_group in self.optimizer.param_groups:
                    param_group["lr"] /= 10.0
                print("Learning rate decay: lr={}".format(
                    self.optimizer.param_groups[0]["lr"]))

            epoch_loss = 0
            for iter, (input, target, bi) in enumerate(train_data_loader):
                # input data (bicubic interpolated image)
                if self.gpu_mode:
                    x_ = Variable(target.cuda())
                    y_ = Variable(
                        utils.img_interp(input, self.scale_factor).cuda())
                else:
                    x_ = Variable(target)
                    y_ = Variable(utils.img_interp(input, self.scale_factor))

                # update network
                self.optimizer.zero_grad()
                recon_image = self.model(y_)
                loss = self.MSE_loss(recon_image, x_)
                loss.backward()

                # gradient clipping
                nn.utils.clip_grad_norm(self.model.parameters(), self.clip)
                self.optimizer.step()

                # log
                epoch_loss += loss.item()
                print("Epoch: [%2d] [%4d/%4d] loss: %.8f" %
                      ((epoch + 1),
                       (iter + 1), len(train_data_loader), loss.item()))

                # tensorboard logging
                #logger.scalar_summary('loss', loss.item(), step + 1)
                step += 1

            # avg. loss per epoch
            avg_loss.append(epoch_loss / len(train_data_loader))

            # prediction
            recon_imgs = self.model(
                Variable(
                    utils.img_interp(test_input, self.scale_factor).cuda()))
            recon_img = recon_imgs[0].cpu().data
            gt_img = test_target[0]
            lr_img = test_input[0]
            bc_img = utils.img_interp(test_input[0], self.scale_factor)

            # calculate psnrs
            bc_psnr = utils.PSNR(bc_img, gt_img)
            recon_psnr = utils.PSNR(recon_img, gt_img)

            # save result images
            result_imgs = [gt_img, lr_img, bc_img, recon_img]
            psnrs = [None, None, bc_psnr, recon_psnr]
            utils.plot_test_result(result_imgs,
                                   psnrs,
                                   epoch + 1,
                                   save_dir=self.save_dir,
                                   is_training=True)

            print("Saving training result images at epoch %d" % (epoch + 1))

            # Save trained parameters of model
            if (epoch + 1) % self.save_epochs == 0:
                self.save_model(epoch + 1)

        # Plot avg. loss
        utils.plot_loss([avg_loss], self.num_epochs, save_dir=self.save_dir)
        print("Training is finished.")

        # Save final trained parameters of model
        self.save_model(epoch=None)
Example #9
0
                              direction=params.direction,
                              transform=test_transform)
test_data_loader = torch.utils.data.DataLoader(dataset=test_data,
                                               batch_size=params.batch_size,
                                               shuffle=False)

# Load model
G = Generator(3, params.ngf, 3)
G.cuda()
G.load_state_dict(torch.load(model_dir + 'generator_param.pkl'))

# Test
for i, (input, target) in enumerate(test_data_loader):
    # input & target image data
    x_ = Variable(input.cuda())
    y_ = Variable(target.cuda())

    gen_image = G(x_)
    gen_image = gen_image.cpu().data

    # Show result for test data
    utils.plot_test_result(input,
                           target,
                           gen_image,
                           i,
                           training=False,
                           save=True,
                           save_dir=save_dir)

    print('%d images are generated.' % (i + 1))
Example #10
0
    def train(self):
        # networks
        self.num_recursions = 16
        self.model = Net(num_channels=self.num_channels,
                         base_filter=256,
                         num_recursions=self.num_recursions)

        # weigh initialization
        self.model.weight_init()

        # optimizer
        self.momentum = 0.9
        self.weight_decay = 0.0001
        self.loss_alpha = 1.0
        self.loss_alpha_zero_epoch = 25
        self.loss_alpha_decay = self.loss_alpha / self.loss_alpha_zero_epoch
        self.loss_beta = 0.001

        # learnable parameters
        param_groups = list(self.model.parameters())
        param_groups = [{'params': param_groups}]
        param_groups += [{'params': [self.model.w]}]
        self.optimizer = optim.Adam(param_groups, lr=self.lr)

        # loss function
        if self.gpu_mode:
            self.model.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.model)
        print('----------------------------------------------')

        # load dataset
        train_data_loader = self.load_dataset(dataset='train')
        test_data_loader = self.load_dataset(dataset='test')

        # set the logger
        log_dir = os.path.join(self.save_dir, 'logs')
        if not os.path.exists(log_dir):
            os.mkdir(log_dir)
        logger = Logger(log_dir)

        ################# Train #################
        print('Training is started.')
        avg_loss = []
        step = 0

        # test image
        test_input, test_target = test_data_loader.dataset.__getitem__(2)
        test_input = test_input.unsqueeze(0)
        test_target = test_target.unsqueeze(0)

        self.model.train()
        for epoch in range(self.num_epochs):

            # learning rate is decayed by a factor of 10 every 20 epochs
            if (epoch + 1) % 20 == 0:
                for param_group in self.optimizer.param_groups:
                    param_group["lr"] /= 10.0
                print("Learning rate decay: lr={}".format(
                    self.optimizer.param_groups[0]["lr"]))

            # loss_alpha decayed to zero after 25 epochs
            self.loss_alpha = max(0.0, self.loss_alpha - self.loss_alpha_decay)

            epoch_loss = 0
            for iter, (input, target) in enumerate(train_data_loader):
                # input data (bicubic interpolated image)
                if self.gpu_mode:
                    y = Variable(target.cuda())
                    x = Variable(
                        utils.img_interp(input, self.scale_factor).cuda())
                else:
                    y = Variable(target)
                    x = Variable(utils.img_interp(input, self.scale_factor))

                # update network
                self.optimizer.zero_grad()
                y_d_, y_ = self.model(x)

                # loss1
                loss1 = 0
                for d in range(self.num_recursions):
                    loss1 += (self.MSE_loss(y_d_[d], y) / self.num_recursions)

                # loss2
                loss2 = self.MSE_loss(y_, y)

                # regularization
                reg_term = 0
                for theta in self.model.parameters():
                    reg_term += torch.mean(torch.sum(theta**2))

                # total loss

                loss = self.loss_alpha * loss1 + (
                    1 - self.loss_alpha) * loss2 + self.loss_beta * reg_term
                loss.backward()
                self.optimizer.step()

                # log
                epoch_loss += loss.data[0]
                print("Epoch: [%2d] [%4d/%4d] loss: %.8f" %
                      ((epoch + 1),
                       (iter + 1), len(train_data_loader), loss.data[0]))

                # tensorboard logging
                logger.scalar_summary('loss', loss.data[0], step + 1)
                step += 1

            # avg. loss per epoch
            avg_loss.append(epoch_loss / len(train_data_loader))

            # prediction
            _, recon_imgs = self.model(
                Variable(
                    utils.img_interp(test_input, self.scale_factor).cuda()))
            recon_img = recon_imgs[0].cpu().data
            gt_img = test_target[0]
            lr_img = test_input[0]
            bc_img = utils.img_interp(test_input[0], self.scale_factor)

            # calculate psnrs
            bc_psnr = utils.PSNR(bc_img, gt_img)
            recon_psnr = utils.PSNR(recon_img, gt_img)

            # save result images
            result_imgs = [gt_img, lr_img, bc_img, recon_img]
            psnrs = [None, None, bc_psnr, recon_psnr]
            utils.plot_test_result(result_imgs,
                                   psnrs,
                                   epoch + 1,
                                   save_dir=self.save_dir,
                                   is_training=True)

            print("Saving training result images at epoch %d" % (epoch + 1))

            # Save trained parameters of model
            if (epoch + 1) % self.save_epochs == 0:
                self.save_model(epoch + 1)

        # Plot avg. loss
        utils.plot_loss([avg_loss], self.num_epochs, save_dir=self.save_dir)
        print("Training is finished.")

        # Save final trained parameters of model
        self.save_model(epoch=None)
Example #11
0
    def train(self):
        # networks
        self.model = Net(3, 64, 3, 1)
        LossNet.creat_loss_Net(self)
        if self.gpu_mode:
            self.model.cuda()

        if self.pre_epochs == 0:
            # weigh initialization
            self.model.weight_init()
        else:
            # (self, is training or not, is E_model(True) or D_model(False))
            utils.load_model(self, True, True)
            # self.model.load_state_dict(torch.load(self.E_pretrain))

        # optimizer
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)

        self.valid = None
        self.fake = None
        if 'A' in self.model_loss:
            self.discriminator = Discriminator().cuda(
            ) if self.gpu_mode else Discriminator()
            Tensor = torch.cuda.FloatTensor if self.gpu_mode else torch.Tensor
            self.optimizer_D = torch.optim.Adam(
                self.discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))

            if self.pre_epochs != 0:
                utils.load_model(self, True, False)

            if self.loss_F == "BCEWithLogitsLoss":
                self.criterion_GAN = nn.BCEWithLogitsLoss(
                    size_average=False).cuda(
                    ) if self.gpu_mode else nn.BCEWithLogitsLoss(
                        size_average=False)
            # elif self.loss_F == "Cross"

        # print('---------- Networks architecture -------------')
        # utils.print_network(self.model)
        # print('----------------------------------------------')

        # load dataset
        train_data_loader = self.load_dataset(dataset=self.train_dataset,
                                              is_train=True)
        test_data_loader = self.load_dataset(dataset=self.test_dataset[0],
                                             is_train=False)

        # set the logger
        log_dir = os.path.join(self.save_dir, 'logs')
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        logger = Logger(log_dir)

        ################# Train #################
        print('Training is started.')
        avg_loss = []
        step = 0
        avg_loss_D = []
        # test image
        test_lr, test_hr, test_bc, name = test_data_loader.dataset.__getitem__(
            2)

        test_lr = test_lr.unsqueeze(0)
        test_hr = test_hr.unsqueeze(0)
        test_bc = test_bc.unsqueeze(0)

        self.model.train()

        for epoch in range(self.num_epochs):

            # learning rate is decayed by a factor of 2 every 40 epochs
            if (epoch + 1) % 40 == 0:
                for param_group in self.optimizer.param_groups:
                    param_group['lr'] /= 2.0
                print('Learning rate decay: lr={}'.format(
                    self.optimizer.param_groups[0]['lr']))

            epoch_loss = 0
            epoch_loss_D = 0
            for iter, (lr, hr, bc_lr) in enumerate(train_data_loader):
                # input data (low resolution image)
                x_ = Variable(hr).cuda() if self.gpu_mode else Variable(hr)
                y_ = Variable(lr).cuda() if self.gpu_mode else Variable(hr)
                bc_y_ = Variable(bc_lr).cuda() if self.gpu_mode else Variable(
                    hr)

                recon_image = self.model(y_)
                recon_image = recon_image + bc_y_
                loss_G = 0
                loss_D = 0
                style_loss = 0
                loss_T = []
                if 'A' in self.model_loss:
                    # ---------------------
                    #  Train Discriminator
                    # ---------------------
                    self.valid = Variable(Tensor(
                        np.ones(x_.size()[0]).reshape((x_.size()[0], 1))),
                                          requires_grad=False)
                    self.fake = Variable(Tensor(
                        np.zeros(x_.size()[0]).reshape((x_.size()[0], 1))),
                                         requires_grad=False)
                    if self.gpu_mode:
                        self.valid.cuda()
                        self.fake.cuda()

                    self.optimizer_D.zero_grad()

                    # Loss of real and fake images
                    if self.loss_F == "BCEWithLogitsLoss":
                        loss_real = self.criterion_GAN(self.discriminator(x_),
                                                       self.valid)
                        loss_fake = self.criterion_GAN(
                            self.discriminator(recon_image.detach()),
                            self.fake)
                    elif self.loss_F == "MSE":
                        loss_real = self.mse_loss(self.discriminator(x_),
                                                  self.valid)
                        loss_fake = self.mse_loss(
                            self.discriminator(recon_image.detach()),
                            self.fake)

                    # Total loss
                    loss_D = (loss_real + loss_fake) / 2

                    loss_D.backward()
                    self.optimizer_D.step()
                    epoch_loss_D += loss_D.data[0]

                    if iter % self.D_period == 0:
                        # -----------------
                        #  Train Generator
                        # -----------------
                        self.optimizer.zero_grad()

                        loss_a, loss_output_m2, loss_output_m5, style_score, loss_G, loss_T = Loss.loss_op(
                            self, self, recon_image, x_)

                        loss = (2*0.1*loss_output_m2) + \
                            (2*0.01*loss_output_m5) + \
                            loss_a*2 + style_score*1e-6
                        loss.backward()
                        self.optimizer.step()

                        # log
                        epoch_loss += loss.data[0]
                        utils.print_loss(self, epoch, len(train_data_loader),
                                         loss, style_score, loss_output_m2,
                                         loss_output_m5, iter, loss_a, loss_D,
                                         loss_G, loss_T)

                        # tensorboard logging
                        logger.scalar_summary('loss', loss.data[0], step + 1)
                        step += 1
                        del x_, y_, bc_y_
                        if self.gpu_mode:
                            torch.cuda.empty_cache()
                else:
                    # update network
                    self.optimizer.zero_grad()

                    loss_a, loss_output_m2, loss_output_m5, style_score, loss_G, loss_T = Loss.loss_op(
                        self, self, recon_image, x_)
                    loss = (2*0.1*loss_output_m2) + \
                        (2*0.01*loss_output_m5) + \
                        loss_a*1e-3 + style_score*1e-6

                    loss.backward()
                    self.optimizer.step()

                    # log
                    epoch_loss += loss.data[0]
                    #epoch_loss += loss
                    utils.print_loss(self, epoch, len(train_data_loader), loss,
                                     style_score, loss_output_m2,
                                     loss_output_m5, iter, loss_a, loss_D,
                                     loss_G, loss_T)

                    # tensorboard logging
                    logger.scalar_summary('loss', loss.data[0], step + 1)
                    step += 1
                    del x_, y_, bc_y_
                    if self.gpu_mode:
                        torch.cuda.empty_cache()

            # avg. loss per epoch
            avg_loss.append(epoch_loss / len(train_data_loader))

            # prediction
            y_ = Variable(test_lr).cuda() if self.gpu_mode else Variable(
                test_lr)
            bc = Variable(test_bc).cuda() if self.gpu_mode else Variable(
                test_bc)

            recon_img = self.model(y_)
            recon_img = recon_img + bc
            sr_img = recon_img[0].cpu().data

            # save result image
            save_dir = os.path.join(self.save_dir, 'train_result')
            utils.save_img(sr_img,
                           epoch + 1,
                           save_dir=save_dir,
                           is_training=True)
            print('Result image at epoch %d is saved.' % (epoch + 1))

            #
            utils.plot_loss([avg_loss], epoch, save_dir=save_dir)

            if 'A' in self.model_loss:
                avg_loss_D.append(epoch_loss_D / len(train_data_loader))
                utils.plot_loss([avg_loss_D], epoch, save_dir=save_dir + '/D')

            del y_, bc
            if self.gpu_mode:
                torch.cuda.empty_cache()

            # Save trained parameters of model
            if (epoch + 1) % self.save_epochs == 0:
                utils.save_model(self, epoch + 1)
                test_v2.save_TrainingTest(self, epoch + 1)
        # calculate psnrs
        if self.num_channels == 1:
            gt_img = test_hr[0][0].unsqueeze(0)
            lr_img = test_lr[0][0].unsqueeze(0)
            bc_img = test_bc[0][0].unsqueeze(0)
        else:
            gt_img = test_hr[0]
            lr_img = test_lr[0]
            bc_img = test_bc[0]

        bc_psnr = utils.PSNR(bc_img, gt_img)
        recon_psnr = utils.PSNR(sr_img, gt_img)

        # plot result images
        result_imgs = [gt_img, lr_img, bc_img, sr_img]
        psnrs = [None, None, bc_psnr, recon_psnr]
        utils.plot_test_result(result_imgs,
                               psnrs,
                               self.num_epochs,
                               save_dir=save_dir,
                               is_training=True)
        print('Training result image is saved.')

        # Plot avg. loss
        utils.plot_loss([avg_loss], self.num_epochs, save_dir=save_dir)
        print('Training is finished.')

        # Save final trained parameters of model
        utils.save_model(self, epoch=None)
Example #12
0
    def test(self):
        # networks
        self.model = Net(3, 64, 3, 1)
        # load model

        utils.load_model(self, False, False)
        # self.model.load_state_dict(torch.load('epoch0_70.pkl'))

        if self.gpu_mode:
            self.model.cuda()

        # load dataset
        for test_dataset in self.test_dataset:
            test_data_loader = self.load_dataset(dataset=test_dataset,
                                                 is_train=False)

            # Test
            print('Test is started.')
            img_num = 0
            total_img_num = len(test_data_loader)
            self.model.eval()
            for lr, hr, bc, _ in test_data_loader:
                # input data (low resolution image)
                x_ = Variable(hr).cuda() if self.gpu_mode else Variable(hr)
                y_ = Variable(lr).cuda() if self.gpu_mode else Variable(hr)
                bc_ = Variable(bc).cuda() if self.gpu_mode else Variable(hr)

                # prediction
                recon_imgs = self.model(y_)
                recon_imgs += bc_
                for i, recon_img in enumerate(recon_imgs):
                    img_num += 1
                    sr_img = recon_img.cpu().data

                    # save result image
                    # save_dir = os.path.join(
                    #     self.save_dir, 'test_result_texture', test_dataset)
                    save_dir = os.path.join(self.save_dir, 'test_result',
                                            test_dataset)
                    utils.save_img(sr_img, img_num, save_dir=save_dir)
                    # utils.save_img(x_, img_num, save_dir=os.path.join(
                    #     self.save_dir, 'test_HR', test_dataset))
                    # calculate psnrs
                    if self.num_channels == 1:
                        gt_img = hr[i][0].unsqueeze(0)
                        lr_img = lr[i][0].unsqueeze(0)
                        bc_img = bc[i][0].unsqueeze(0)
                    else:
                        gt_img = hr[i]
                        lr_img = lr[i]
                        bc_img = bc[i]

                    bc_psnr = utils.PSNR(bc_img, gt_img)
                    recon_psnr = utils.PSNR(sr_img, gt_img)

                    # plot result images
                    result_imgs = [gt_img, lr_img, bc_img, sr_img]
                    psnrs = [None, None, bc_psnr, recon_psnr]
                    utils.plot_test_result(result_imgs,
                                           psnrs,
                                           img_num,
                                           save_dir=save_dir)

                    print('Test DB: %s, Saving result images...[%d/%d]' %
                          (test_dataset, img_num, total_img_num))

            print('Test is finishied.')
    def train(self):
        # networks
        self.model = Net(num_channels=self.num_channels,
                         base_filter=64,
                         num_residuals=16)

        # weigh initialization
        self.model.weight_init()

        # optimizer
        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.lr,
                                    betas=(0.9, 0.999),
                                    eps=1e-8)

        # loss function
        if self.gpu_mode:
            self.model.cuda()
            self.L1_loss = nn.L1Loss().cuda()
        else:
            self.L1_loss = nn.L1Loss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.model)
        print('----------------------------------------------')

        # load dataset
        train_data_loader = self.load_dataset(dataset=self.train_dataset,
                                              is_train=True)
        test_data_loader = self.load_dataset(dataset=self.test_dataset[0],
                                             is_train=False)

        # set the logger
        log_dir = os.path.join(self.save_dir, 'logs')
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        logger = Logger(log_dir)

        ################# Train #################
        print('Training is started.')
        avg_loss = []
        step = 0

        # test image
        test_lr, test_hr, test_bc = test_data_loader.dataset.__getitem__(2)
        test_lr = test_lr.unsqueeze(0)
        test_hr = test_hr.unsqueeze(0)
        test_bc = test_bc.unsqueeze(0)

        self.model.train()
        for epoch in range(self.num_epochs):

            # learning rate is decayed by a factor of 2 every 40 epochs
            if (epoch + 1) % 40 == 0:
                for param_group in self.optimizer.param_groups:
                    param_group['lr'] /= 2.0
                print('Learning rate decay: lr={}'.format(
                    self.optimizer.param_groups[0]['lr']))

            epoch_loss = 0
            for iter, (lr, hr, _) in enumerate(train_data_loader):
                # input data (low resolution image)
                if self.num_channels == 1:
                    x_ = Variable(hr[:, 0].unsqueeze(1))
                    y_ = Variable(lr[:, 0].unsqueeze(1))
                else:
                    x_ = Variable(hr)
                    y_ = Variable(lr)

                if self.gpu_mode:
                    x_ = x_.cuda()
                    y_ = y_.cuda()

                # update network
                self.optimizer.zero_grad()
                recon_image = self.model(y_)
                loss = self.L1_loss(recon_image, x_)
                loss.backward()
                self.optimizer.step()

                # log
                epoch_loss += loss.data[0]
                print('Epoch: [%2d] [%4d/%4d] loss: %.8f' %
                      ((epoch + 1),
                       (iter + 1), len(train_data_loader), loss.data[0]))

                # tensorboard logging
                logger.scalar_summary('loss', loss.data[0], step + 1)
                step += 1

            # avg. loss per epoch
            avg_loss.append(epoch_loss / len(train_data_loader))

            # prediction
            if self.num_channels == 1:
                y_ = Variable(test_lr[:, 0].unsqueeze(1))
            else:
                y_ = Variable(test_lr)

            if self.gpu_mode:
                y_ = y_.cuda()

            recon_img = self.model(y_)
            sr_img = recon_img[0].cpu().data

            # save result image
            save_dir = os.path.join(self.save_dir, 'train_result')
            utils.save_img(sr_img,
                           epoch + 1,
                           save_dir=save_dir,
                           is_training=True)
            print('Result image at epoch %d is saved.' % (epoch + 1))

            # Save trained parameters of model
            if (epoch + 1) % self.save_epochs == 0:
                self.save_model(epoch + 1)

        # calculate psnrs
        if self.num_channels == 1:
            gt_img = test_hr[0][0].unsqueeze(0)
            lr_img = test_lr[0][0].unsqueeze(0)
            bc_img = test_bc[0][0].unsqueeze(0)
        else:
            gt_img = test_hr[0]
            lr_img = test_lr[0]
            bc_img = test_bc[0]

        bc_psnr = utils.PSNR(bc_img, gt_img)
        recon_psnr = utils.PSNR(sr_img, gt_img)

        # plot result images
        result_imgs = [gt_img, lr_img, bc_img, sr_img]
        psnrs = [None, None, bc_psnr, recon_psnr]
        utils.plot_test_result(result_imgs,
                               psnrs,
                               self.num_epochs,
                               save_dir=save_dir,
                               is_training=True)
        print('Training result image is saved.')

        # Plot avg. loss
        utils.plot_loss([avg_loss], self.num_epochs, save_dir=save_dir)
        print('Training is finished.')

        # Save final trained parameters of model
        self.save_model(epoch=None)