예제 #1
0
    def test(self):
        # networks
        self.num_recursions = 16
        self.model = Net(num_channels=self.num_channels,
                         base_filter=256,
                         num_recursions=self.num_recursions)

        if self.gpu_mode:
            self.model.cuda()

        # load model
        self.load_model()

        # load dataset
        test_data_loader = self.load_dataset(dataset='test')

        # Test
        print('Test is started.')
        img_num = 0
        self.model.eval()
        for input, target in test_data_loader:
            # input data (bicubic interpolated image)
            if self.gpu_mode:
                y_ = Variable(
                    utils.img_interp(input, self.scale_factor).cuda())
            else:
                y_ = Variable(utils.img_interp(input, self.scale_factor))

            # prediction
            _, recon_imgs = self.model(y_)
            for i, recon_img in enumerate(recon_imgs):
                img_num += 1
                recon_img = recon_imgs[i].cpu().data
                gt_img = target[i]
                lr_img = input[i]
                bc_img = utils.img_interp(input[i], self.scale_factor)

                # calculate psnrs
                bc_psnr = utils.PSNR(bc_img, gt_img)
                recon_psnr = utils.PSNR(recon_img, gt_img)

                # save result images
                result_imgs = [gt_img, lr_img, bc_img, recon_img]
                psnrs = [None, None, bc_psnr, recon_psnr]
                utils.plot_test_result(result_imgs,
                                       psnrs,
                                       img_num,
                                       save_dir=self.save_dir)

                print("Saving %d test result images..." % img_num)
예제 #2
0
    def test(self):
        # networks
        self.model = Net(num_channels=self.num_channels, base_filter=64, scale_factor=self.scale_factor)

        if self.gpu_mode:
            self.model.cuda()

        # load model
        self.load_model()

        # load dataset
        test_data_loader = self.load_dataset(dataset='test')

        # Test
        print('Test is started.')
        img_num = 0
        self.model.eval()
        for input, target in test_data_loader:
            # input data (low resolution image)
            if self.gpu_mode:
                y_ = Variable(input.cuda())
            else:
                y_ = Variable(input)

            # prediction
            recon_imgs = self.model(y_)
            for i in range(self.test_batch_size):
                img_num += 1
                recon_img = recon_imgs[i].cpu().data
                gt_img = utils.shave(target[i], border_size=8 * self.scale_factor)
                lr_img = utils.shave(input[i], border_size=8)
                bc_img = utils.shave(utils.img_interp(input[i], self.scale_factor), border_size=8 * self.scale_factor)

                # calculate psnrs
                bc_psnr = utils.PSNR(bc_img, gt_img)
                recon_psnr = utils.PSNR(recon_img, gt_img)

                # save result images
                result_imgs = [gt_img, lr_img, bc_img, recon_img]
                psnrs = [None, None, bc_psnr, recon_psnr]
                utils.plot_test_result(result_imgs, psnrs, img_num, save_dir=self.save_dir)

                print("Saving %d test result images..." % img_num)
예제 #3
0
    def train(self):
        # networks
        self.model = Net(num_channels=self.num_channels, base_filter=64)

        # weigh initialization
        self.model.weight_init(mean=0.0, std=0.001)

        # optimizer
        self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr)

        # loss function
        if self.gpu_mode:
            self.model.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.model)
        print('----------------------------------------------')

        # load dataset
        train_data_loader = self.load_dataset(dataset='train')
        test_data_loader = self.load_dataset(dataset='test')

        # set the logger
        log_dir = os.path.join(self.save_dir, 'logs')
        if not os.path.exists(log_dir):
            os.mkdir(log_dir)
        logger = Logger(log_dir)

        ################# Train #################
        print('Training is started.')
        avg_loss = []
        step = 0

        # test image
        test_input, test_target = test_data_loader.dataset.__getitem__(2)
        test_input = test_input.unsqueeze(0)
        test_target = test_target.unsqueeze(0)

        self.model.train()
        for epoch in range(self.num_epochs):

            epoch_loss = 0
            for iter, (input, target) in enumerate(train_data_loader):
                # input data (bicubic interpolated image)
                if self.gpu_mode:
                    # exclude border pixels from loss computation
                    x_ = Variable(utils.shave(target, border_size=8).cuda())
                    y_ = Variable(utils.img_interp(input, self.scale_factor).cuda())
                else:
                    x_ = Variable(utils.shave(target, border_size=8))
                    y_ = Variable(utils.img_interp(input, self.scale_factor))

                # update network
                self.optimizer.zero_grad()
                recon_image = self.model(y_)
                loss = self.MSE_loss(recon_image, x_)
                loss.backward()
                self.optimizer.step()

                # log
                epoch_loss += loss.data[0]
                print("Epoch: [%2d] [%4d/%4d] loss: %.8f" % ((epoch + 1), (iter + 1), len(train_data_loader), loss.data[0]))

                # tensorboard logging
                logger.scalar_summary('loss', loss.data[0], step + 1)
                step += 1

            # avg. loss per epoch
            avg_loss.append(epoch_loss / len(train_data_loader))

            # prediction
            recon_imgs = self.model(Variable(utils.img_interp(test_input, self.scale_factor).cuda()))
            recon_img = recon_imgs[0].cpu().data
            gt_img = utils.shave(test_target[0], border_size=8)
            lr_img = test_input[0]
            bc_img = utils.shave(utils.img_interp(test_input[0], self.scale_factor), border_size=8)

            # calculate psnrs
            bc_psnr = utils.PSNR(bc_img, gt_img)
            recon_psnr = utils.PSNR(recon_img, gt_img)

            # save result images
            result_imgs = [gt_img, lr_img, bc_img, recon_img]
            psnrs = [None, None, bc_psnr, recon_psnr]
            utils.plot_test_result(result_imgs, psnrs, epoch + 1, save_dir=self.save_dir, is_training=True)

            print("Saving training result images at epoch %d" % (epoch + 1))

            # Save trained parameters of model
            if (epoch + 1) % self.save_epochs == 0:
                self.save_model(epoch + 1)

        # Plot avg. loss
        utils.plot_loss([avg_loss], self.num_epochs, save_dir=self.save_dir)
        print("Training is finished.")

        # Save final trained parameters of model
        self.save_model(epoch=None)
예제 #4
0
    def train(self):
        # networks
        self.model = Net(num_channels=self.num_channels,
                         base_filter=64,
                         num_residuals=18)

        # weigh initialization
        self.model.weight_init()

        # optimizer
        self.momentum = 0.9
        self.weight_decay = 0.0001
        self.clip = 0.4
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.lr,
                                   momentum=self.momentum,
                                   weight_decay=self.weight_decay)

        # loss function
        if self.gpu_mode:
            self.model.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.model)
        print('----------------------------------------------')

        # load dataset
        train_data_loader = self.load_dataset(dataset='train')
        test_data_loader = self.load_dataset(dataset='test')

        # set the logger
        log_dir = os.path.join(self.save_dir, 'logs')
        if not os.path.exists(log_dir):
            os.mkdir(log_dir)
        logger = Logger(log_dir)

        ################# Train #################
        print('Training is started.')
        avg_loss = []
        step = 0

        # test image
        test_input, test_target, test_bicubic = test_data_loader.dataset.__getitem__(
            2)
        test_input = test_input.unsqueeze(0)
        test_target = test_target.unsqueeze(0)

        self.model.train()
        for epoch in range(self.num_epochs):

            # learning rate is decayed by a factor of 10 every 20 epochs
            if (epoch + 1) % 20 == 0:
                for param_group in self.optimizer.param_groups:
                    param_group["lr"] /= 10.0
                print("Learning rate decay: lr={}".format(
                    self.optimizer.param_groups[0]["lr"]))

            epoch_loss = 0
            for iter, (input, target, bi) in enumerate(train_data_loader):
                # input data (bicubic interpolated image)
                if self.gpu_mode:
                    x_ = Variable(target.cuda())
                    y_ = Variable(
                        utils.img_interp(input, self.scale_factor).cuda())
                else:
                    x_ = Variable(target)
                    y_ = Variable(utils.img_interp(input, self.scale_factor))

                # update network
                self.optimizer.zero_grad()
                recon_image = self.model(y_)
                loss = self.MSE_loss(recon_image, x_)
                loss.backward()

                # gradient clipping
                nn.utils.clip_grad_norm(self.model.parameters(), self.clip)
                self.optimizer.step()

                # log
                epoch_loss += loss.item()
                print("Epoch: [%2d] [%4d/%4d] loss: %.8f" %
                      ((epoch + 1),
                       (iter + 1), len(train_data_loader), loss.item()))

                # tensorboard logging
                #logger.scalar_summary('loss', loss.item(), step + 1)
                step += 1

            # avg. loss per epoch
            avg_loss.append(epoch_loss / len(train_data_loader))

            # prediction
            recon_imgs = self.model(
                Variable(
                    utils.img_interp(test_input, self.scale_factor).cuda()))
            recon_img = recon_imgs[0].cpu().data
            gt_img = test_target[0]
            lr_img = test_input[0]
            bc_img = utils.img_interp(test_input[0], self.scale_factor)

            # calculate psnrs
            bc_psnr = utils.PSNR(bc_img, gt_img)
            recon_psnr = utils.PSNR(recon_img, gt_img)

            # save result images
            result_imgs = [gt_img, lr_img, bc_img, recon_img]
            psnrs = [None, None, bc_psnr, recon_psnr]
            utils.plot_test_result(result_imgs,
                                   psnrs,
                                   epoch + 1,
                                   save_dir=self.save_dir,
                                   is_training=True)

            print("Saving training result images at epoch %d" % (epoch + 1))

            # Save trained parameters of model
            if (epoch + 1) % self.save_epochs == 0:
                self.save_model(epoch + 1)

        # Plot avg. loss
        utils.plot_loss([avg_loss], self.num_epochs, save_dir=self.save_dir)
        print("Training is finished.")

        # Save final trained parameters of model
        self.save_model(epoch=None)
예제 #5
0
    def train(self):
        # networks
        self.num_recursions = 16
        self.model = Net(num_channels=self.num_channels,
                         base_filter=256,
                         num_recursions=self.num_recursions)

        # weigh initialization
        self.model.weight_init()

        # optimizer
        self.momentum = 0.9
        self.weight_decay = 0.0001
        self.loss_alpha = 1.0
        self.loss_alpha_zero_epoch = 25
        self.loss_alpha_decay = self.loss_alpha / self.loss_alpha_zero_epoch
        self.loss_beta = 0.001

        # learnable parameters
        param_groups = list(self.model.parameters())
        param_groups = [{'params': param_groups}]
        param_groups += [{'params': [self.model.w]}]
        self.optimizer = optim.Adam(param_groups, lr=self.lr)

        # loss function
        if self.gpu_mode:
            self.model.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.model)
        print('----------------------------------------------')

        # load dataset
        train_data_loader = self.load_dataset(dataset='train')
        test_data_loader = self.load_dataset(dataset='test')

        # set the logger
        log_dir = os.path.join(self.save_dir, 'logs')
        if not os.path.exists(log_dir):
            os.mkdir(log_dir)
        logger = Logger(log_dir)

        ################# Train #################
        print('Training is started.')
        avg_loss = []
        step = 0

        # test image
        test_input, test_target = test_data_loader.dataset.__getitem__(2)
        test_input = test_input.unsqueeze(0)
        test_target = test_target.unsqueeze(0)

        self.model.train()
        for epoch in range(self.num_epochs):

            # learning rate is decayed by a factor of 10 every 20 epochs
            if (epoch + 1) % 20 == 0:
                for param_group in self.optimizer.param_groups:
                    param_group["lr"] /= 10.0
                print("Learning rate decay: lr={}".format(
                    self.optimizer.param_groups[0]["lr"]))

            # loss_alpha decayed to zero after 25 epochs
            self.loss_alpha = max(0.0, self.loss_alpha - self.loss_alpha_decay)

            epoch_loss = 0
            for iter, (input, target) in enumerate(train_data_loader):
                # input data (bicubic interpolated image)
                if self.gpu_mode:
                    y = Variable(target.cuda())
                    x = Variable(
                        utils.img_interp(input, self.scale_factor).cuda())
                else:
                    y = Variable(target)
                    x = Variable(utils.img_interp(input, self.scale_factor))

                # update network
                self.optimizer.zero_grad()
                y_d_, y_ = self.model(x)

                # loss1
                loss1 = 0
                for d in range(self.num_recursions):
                    loss1 += (self.MSE_loss(y_d_[d], y) / self.num_recursions)

                # loss2
                loss2 = self.MSE_loss(y_, y)

                # regularization
                reg_term = 0
                for theta in self.model.parameters():
                    reg_term += torch.mean(torch.sum(theta**2))

                # total loss

                loss = self.loss_alpha * loss1 + (
                    1 - self.loss_alpha) * loss2 + self.loss_beta * reg_term
                loss.backward()
                self.optimizer.step()

                # log
                epoch_loss += loss.data[0]
                print("Epoch: [%2d] [%4d/%4d] loss: %.8f" %
                      ((epoch + 1),
                       (iter + 1), len(train_data_loader), loss.data[0]))

                # tensorboard logging
                logger.scalar_summary('loss', loss.data[0], step + 1)
                step += 1

            # avg. loss per epoch
            avg_loss.append(epoch_loss / len(train_data_loader))

            # prediction
            _, recon_imgs = self.model(
                Variable(
                    utils.img_interp(test_input, self.scale_factor).cuda()))
            recon_img = recon_imgs[0].cpu().data
            gt_img = test_target[0]
            lr_img = test_input[0]
            bc_img = utils.img_interp(test_input[0], self.scale_factor)

            # calculate psnrs
            bc_psnr = utils.PSNR(bc_img, gt_img)
            recon_psnr = utils.PSNR(recon_img, gt_img)

            # save result images
            result_imgs = [gt_img, lr_img, bc_img, recon_img]
            psnrs = [None, None, bc_psnr, recon_psnr]
            utils.plot_test_result(result_imgs,
                                   psnrs,
                                   epoch + 1,
                                   save_dir=self.save_dir,
                                   is_training=True)

            print("Saving training result images at epoch %d" % (epoch + 1))

            # Save trained parameters of model
            if (epoch + 1) % self.save_epochs == 0:
                self.save_model(epoch + 1)

        # Plot avg. loss
        utils.plot_loss([avg_loss], self.num_epochs, save_dir=self.save_dir)
        print("Training is finished.")

        # Save final trained parameters of model
        self.save_model(epoch=None)