Beispiel #1
0
def main():
    ## data
    print('Loading data...')
    test_hr_path = os.path.join('data/', dataset)
    if dataset == 'Set5':
        ext = '*.bmp'
    else:
        ext = '*.png'
    hr_paths = sorted(glob.glob(os.path.join(test_hr_path, ext)))

    ## model
    print('Loading model...')
    tensor_lr = tf.placeholder('float32', [1, None, None, 3], name='tensor_lr')
    tensor_b = tf.placeholder('float32', [1, None, None, 3], name='tensor_b')

    tensor_sr = IDN(tensor_lr, tensor_b, scale)
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                            log_device_placement=False))
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess, model_path)

    ## result
    save_path = os.path.join(saved_path, dataset + '/x' + str(scale))
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    psnr_score = 0
    for i, _ in enumerate(hr_paths):
        print('processing image %d' % (i + 1))
        img_hr = utils.modcrop(misc.imread(hr_paths[i]), scale)
        img_lr = utils.downsample_fn(img_hr, scale=scale)
        img_b = utils.upsample_fn(img_lr, scale=scale)
        [lr, b] = utils.datatype([img_lr, img_b])
        lr = lr[np.newaxis, :, :, :]
        b = b[np.newaxis, :, :, :]
        [sr] = sess.run([tensor_sr], {tensor_lr: lr, tensor_b: b})
        sr = utils.quantize(np.squeeze(sr))
        img_sr = utils.shave(sr, scale)
        img_hr = utils.shave(img_hr, scale)
        if not rgb:
            img_pre = utils.quantize(sc.rgb2ycbcr(img_sr)[:, :, 0])
            img_label = utils.quantize(sc.rgb2ycbcr(img_hr)[:, :, 0])
        else:
            img_pre = img_sr
            img_label = img_hr
        psnr_score += utils.compute_psnr(img_pre, img_label)
        misc.imsave(os.path.join(save_path, os.path.basename(hr_paths[i])), sr)

    print('Average PSNR: %.4f' % (psnr_score / len(hr_paths)))
    print('Finish')
Beispiel #2
0
def validation(sess, vdsr, epoch, scale):

    if not os.path.exists('./validation'):
        os.makedirs('./validation')

    validation_result_path = {
        2: './validation/2.csv',
        3: './validation/3.csv',
        4: './validation/4.csv'
    }

    s = scale
    if not os.path.exists('./validation/%d' % s):
        os.makedirs('./validation/%d' % s)

    lr, gt = data.load_lr_gt_mat('./data/test_data/mat/Set5', s)
    v_len = len(gt)

    psnr = []

    for i in range(v_len):
        lr_image = lr[i]['data']
        gt_image = gt[i]['data']

        residual, sr = sess.run([vdsr.residual, vdsr.inference],
                                feed_dict={
                                    vdsr.lr:
                                    lr_image.reshape((1, ) + lr_image.shape +
                                                     (1, ))
                                })

        sr = sr.reshape(sr.shape[1:3])
        residual = residual.reshape(residual.shape[1:3])

        utils.save_image(
            sr, './validation/%d/%s_sr_scale_%d_epoch_%d.png' %
            (s, lr[i]['name'], s, epoch))

        residual = utils.normalize(residual)
        utils.save_image(
            residual, './validation/%d/%s_residual_scale_%d_epoch_%d.png' %
            (s, lr[i]['name'], s, epoch))

        sr_ = utils.shave(sr, s)
        gt_image_ = utils.shave(gt_image, s)
        psnr.append(utils.psnr(gt_image_, sr_))
    with open(validation_result_path[s], 'a') as f:
        f.write('%d, %s, %f\n' %
                (epoch, ', '.join(str(e) for e in psnr), float(np.mean(psnr))))
    def validation(self, epoch,
                   val_data_loader):  #input as YCbCr   to be complete
        print('Validation is started.')

        # test_data_loader = self.load_dataset(dataset='test')
        self.model.eval()
        img_num = 0
        total_loss = 0
        for _, data in enumerate(val_data_loader):
            LR = data['img_LR']
            target = data['img_HR']

            input_Y = LR[:, 0:1, :, :]
            target_Y = target[:, 0:1, :, :]
            target_Y = utils.shave(target_Y, border_size=2 * self.scale_factor)

            if self.gpu_mode:
                input = Variable(input_Y.cuda())
            else:
                input = Variable(input_Y)

            # prediction
            recon_imgs = self.model(
                input)  #inference 3 channel, to be complete
            loss = self.loss(recon_imgs, target_Y)
            total_loss += loss.data
            #scipy.misc.imsave(self.img_save_dir + '/img' + str(img_num) + '_' + str(self.scale_factor) + 'x_' + str(epoch)+'LR_'+str(self.lr)+'.png', recon_img)

        print('the average validation dataset loss is',
              total_loss / len(val_data_loader))
    def test(self):
        # networks
        self.model = Net(num_channels=self.num_channels, scale_factor=self.scale_factor, d=56, s=12, m=4)

        if self.gpu_mode:
            self.model.cuda()

        # load model
        self.load_model()

        # load dataset
        test_data_loader = self.load_dataset(dataset='test')

        # Test
        print('Test is started.')
        img_num = 0
        self.model.eval()
        for input, target in test_data_loader:
            # input data (low resolution image)
            if self.gpu_mode:
                y_ = Variable(input.cuda())
            else:
                y_ = Variable(input)

            # prediction
            recon_imgs = self.model(y_)
            for i, recon_img in enumerate(recon_imgs):
                img_num += 1
                recon_img = recon_imgs[i].cpu().data
                gt_img = utils.shave(target[i], border_size=2 * self.scale_factor)
                lr_img = utils.shave(input[i], border_size=2)
                bc_img = utils.shave(utils.img_interp(input[i], self.scale_factor), border_size=2 * self.scale_factor)

                # calculate psnrs
                bc_psnr = utils.PSNR(bc_img, gt_img)
                recon_psnr = utils.PSNR(recon_img, gt_img)

                # save result images
                result_imgs = [gt_img, lr_img, bc_img, recon_img]
                psnrs = [None, None, bc_psnr, recon_psnr]
                utils.plot_test_result(result_imgs, psnrs, img_num, save_dir=self.save_dir)

                print("Saving %d test result images..." % img_num)
Beispiel #5
0
    def test(self):
        # networks
        self.model = Net(num_channels=self.num_channels, base_filter=64)

        if self.gpu_mode:
            self.model.cuda()

        # load model
        self.load_model()

        # load dataset
        test_data_loader = self.load_dataset(dataset='test')

        # Test
        print('Test is started.')
        img_num = 0
        self.model.eval()
        for input, target in test_data_loader:
            # input data (bicubic interpolated image)
            if self.gpu_mode:
                y_ = Variable(utils.img_interp(input, self.scale_factor).cuda())
            else:
                y_ = Variable(utils.img_interp(input, self.scale_factor))

            # prediction
            recon_imgs = self.model(y_)
            for i in range(self.test_batch_size):
                img_num += 1
                recon_img = recon_imgs[i].cpu().data
                gt_img = utils.shave(target[i], border_size=8)
                lr_img = input[i]
                bc_img = utils.shave(utils.img_interp(input[i], self.scale_factor), border_size=8)

                # calculate psnrs
                bc_psnr = utils.PSNR(bc_img, gt_img)
                recon_psnr = utils.PSNR(recon_img, gt_img)

                # save result images
                result_imgs = [gt_img, lr_img, bc_img, recon_img]
                psnrs = [None, None, bc_psnr, recon_psnr]
                utils.plot_test_result(result_imgs, psnrs, img_num, save_dir=self.save_dir)

                print("Saving %d test result images..." % img_num)
    def validation(self, epoch,
                   val_data_loader):  #input as YCbCr   to be complete
        print('Validation is started.')
        os.environ["CUDA_VISIBLE_DEVICES"] = '4,5,6,7'
        #torch.cuda.set_device(4)
        # test_data_loader = self.load_dataset(dataset='test')
        self.model.eval()
        img_num = 0
        total_loss = 0
        for _, (LR, target) in enumerate(val_data_loader):
            input_x = LR[:, 0:1, :, :]
            target_Y = target[:, 0:1, :, :]
            target_y = utils.shave(target_Y, border_size=2 * self.scale_factor)
            if self.gpu_mode:
                input = Variable(input_x.cuda())
                target = Variable(target_y.cuda())
            else:
                target = Variable(target_y)
                # target = Variable(utils.shave(target_Y, border_size=2*self.scale_factor))
                input = Variable(input_x)

            #for ch in range(3):
            #   input_current = LR[:, ch:ch + 1, :, :]
            #  if ch == 0:
            #     target_0 = target[:, ch:ch + 1, :, :]
            #    target_y = utils.shave(target_0, border_size=2 * self.scale_factor)
            #   if self.gpu_mode:
            #      input = Variable(input_current.cuda())
            #     target = Variable(target_y.cuda())
            # else:
            #    input = Variable(input_current)
            #   target = Variable(target_y)

            # prediction
            recon_imgs = self.model(
                input)  #inference 3 channel, to be complete
            loss = self.loss(recon_imgs, target)
            total_loss += loss

            #recon_y = recon_imgs.detach()

            #temp_y = recon_y[0,0,:,:] * 255
            #temp = temp_y.cpu().numpy()
            #temp1 = np.clip(temp, 0, 255)
            #out_y = Image.fromarray(np.uint8(temp1))

            # if ch == 1:
            # m = nn.Upsample(self.scale_factor, mode='bicubic')
            # yb_tensor = utils.shave(m(input_current), border_size=2 * self.scale_factor

        avg_loss = total_loss / len(val_data_loader)
        print('avg_loss = ', avg_loss)
Beispiel #7
0
def test():
    avg_psnr = 0

    for batch in testing_data_loader:
        input, target = batch[0].detach(), batch[1].detach()
        model.feed_data([input], need_HR=False)
        model.test()
        pre = model.get_current_visuals(need_HR=False)
        sr_img = utils.tensor2np(pre['SR'].data)
        gt_img = utils.tensor2np(target.data[0])
        crop_size = args.scale
        cropped_sr_img = utils.shave(sr_img, crop_size)
        cropped_gt_img = utils.shave(gt_img, crop_size)
        if is_y is True:
            im_label = utils.quantize(sc.rgb2ycbcr(cropped_gt_img)[:, :, 0])
            im_pre = utils.quantize(sc.rgb2ycbcr(cropped_sr_img)[:, :, 0])
        else:
            im_label = cropped_gt_img
            im_pre = cropped_sr_img
        avg_psnr += utils.compute_psnr(im_pre, im_label)

    print("===> Valid. psnr: {:.4f}".format(avg_psnr /
                                            len(testing_data_loader)))
Beispiel #8
0
def evalimg(im_h_y, im_gt, shave=0):
    if len(im_gt.shape)==3:
        im_gt_ycbcr = utils.rgb2ycbcr(im_gt/255.0)*255.0
        im_gt_y = im_gt_ycbcr[:, :, 0]
    else:
        im_gt_y = im_gt

    diff = im_h_y.astype(np.uint8).astype(np.float32) - im_gt_y.astype(np.uint8).astype(np.float32)
    #diff = im_h_y - im_gt_y
    if shave>0:
        diff = utils.shave(diff, [shave, shave])
    res = {}
    res['rmse'] = np.sqrt((diff**2).mean())
    res['psnr'] = 20*np.log10(255.0/res['rmse'])
    return res
Beispiel #9
0
def evalimg(im_h_y, im_gt, shave=0):
    if len(im_gt.shape) == 3:
        im_gt_ycbcr = utils.rgb2ycbcr(im_gt / 255.0) * 255.0
        im_gt_y = im_gt_ycbcr[:, :, 0]
    else:
        im_gt_y = im_gt

    diff = im_h_y.astype(np.uint8).astype(np.float32) - im_gt_y.astype(
        np.uint8).astype(np.float32)
    #diff = im_h_y - im_gt_y
    if shave > 0:
        diff = utils.shave(diff, [shave, shave])
    res = {}
    res['rmse'] = np.sqrt((diff**2).mean())
    res['psnr'] = 20 * np.log10(255.0 / res['rmse'])
    return res
Beispiel #10
0
def evalimg(im_h_y, im_gt, shave=0):
    if len(im_gt.shape) == 3:
        im_gt_ycbcr = utils.rgb2ycbcr(im_gt / 255.0) * 255.0
        im_gt_y = im_gt_ycbcr[:, :, 0]
    else:
        im_gt_y = im_gt

    im_h_y_uint8 = np.rint(np.clip(im_h_y, 0, 255))
    im_gt_y_uint8 = np.rint(np.clip(im_gt_y, 0, 255))
    diff = im_h_y_uint8 - im_gt_y_uint8
    #diff = im_h_y - im_gt_y
    if shave > 0:
        diff = utils.shave(diff, [shave, shave])
    res = {}
    res['rmse'] = np.sqrt((diff**2).mean())
    res['psnr'] = 20 * np.log10(255.0 / res['rmse'])
    return res
    def validation(self):  #input as YCbC to be complete
        self.Choose_Model(self.Model_index)
        val_data_loader = self.load_dataset(dataset='test')
        print('Validation is started.')

        if self.loss_func == 'mse':
            self.loss = nn.MSELoss()
        elif self.loss_func == 'ssim':
            self.loss = pytorch_ssim.SSIM(window_size=11)

        # test_data_loader = self.load_dataset(dataset='test')
        #self.model.eval()
        img_num = 0
        total_loss = 0
        for iter, data in enumerate(val_data_loader):
            LR = data['img_LR']
            HR = data['img_HR']
            #only use Y channel
            input_Y = LR[:, 0:1, :, :]
            target_Y = HR[:, 0:1, :, :]

            target_Y = utils.shave(target_Y, border_size=2 * self.scale_factor)

            if self.gpu_mode:
                input = Variable(input_Y.cuda())
            else:
                input = Variable(input_Y)

            # prediction
            recon_imgs = self.model(input).detach()

            savein_target_Y = (
                target_Y.numpy()[0, :, :, :].transpose(1, 2, 0) * 255).astype(
                    numpy.uint8)
            saveinY = (recon_imgs.numpy()[0, :, :, :].transpose(1, 2, 0) *
                       255).astype(numpy.uint8)
            #imageio.imsave('1118_validation_image/predicted/'+ str(iter) + 'predicted.png', saveinY[:, :, 0])
            #imageio.imsave('1118_validation_image/Target_Y/'+ str(iter) + 'target_Y.png', savein_target_Y[:, :, 0])
            loss = self.loss(recon_imgs, target_Y)
            total_loss += loss.data
            #print('validation_loss', loss.data)

            #scipy.misc.imsave(self.img_save_dir + '/img' + str(img_num) + '_' + str(self.scale_factor) + 'x_' + str(epoch)+'LR_'+str(self.lr)+'.png', recon_img)
        print('total_loss, ', total_loss)
        print('the average validation dataset loss is',
              total_loss / len(val_data_loader))
Beispiel #12
0
def run(config):

    test_data_path = './data/test_data/mat/'
    result_root = './result/'

    benchmark_list = ['Set5', 'Set14', 'B100', 'Urban100']
    scale = [2, 3, 4]

    if not os.path.exists(result_root):
        os.makedirs(result_root)
        for benchmark in benchmark_list:
            os.makedirs(os.path.join(result_root, benchmark))
            for s in scale:
                os.makedirs(os.path.join(result_root, benchmark, str(s)))

    s = config.scale
    with tf.Session() as sess:
        vdsr = model.Model(config)
        vdsr.load(sess, config.checkpoint_path, config.model_name)

        for benchmark in benchmark_list:
            print(benchmark)
            test_benchmark_path = os.path.join(test_data_path, benchmark)

            lr, gt = data.load_lr_gt_mat(test_benchmark_path, s)

            quality_result = open(
                os.path.join(result_root, benchmark, 'quality_%d.csv' % s),
                'w')

            quality_result.write('file name, psnr, ssim\n')
            psnr_list = []
            ssim_list = []
            for i, _ in enumerate(gt):

                lr_image = lr[i]['data']
                gt_image = gt[i]['data']

                sr = sess.run(vdsr.inference,
                              feed_dict={
                                  vdsr.lr:
                                  lr_image.reshape((1, ) + lr_image.shape +
                                                   (1, ))
                              })

                sr = sr.reshape(sr.shape[1:3])

                sr_ = utils.shave(sr, s)
                sr_ = sr_.astype(np.float64)
                gt_image_ = utils.shave(gt_image, s)

                _psnr = measure.compare_psnr(gt_image_, sr_)
                _ssim = measure.compare_ssim(gt_image_, sr_)

                quality_result.write('%s, %f, %f\n' %
                                     (gt[i]['name'], _psnr, _ssim))
                psnr_list.append(_psnr)
                ssim_list.append(_ssim)

                scipy.io.savemat(
                    os.path.join(result_root, benchmark, str(s),
                                 gt[i]['name'] + '.mat'), {'sr': sr})

            quality_result.close()
    def train(self):

        # load networks************************************************************************
        self.Choose_Model(self.Model_index)
        utils.print_network(self.model)

        # optimizer
        self.momentum = 0.9
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.lr,
                                   momentum=self.momentum)
        self.model.float()
        # loss function
        if self.loss_func == 'mse':
            self.loss = nn.MSELoss()
        elif self.loss_func == 'ssim':
            self.loss = pytorch_ssim.SSIM(window_size=11)

        if self.gpu_mode:
            self.model.cuda()
            self.loss.cuda()

        # load dataset
        train_data_loader = self.load_dataset(dataset='train')
        val_data_loader = self.load_dataset(dataset='test')

        # set the logger
        log_dir = os.path.join(self.save_dir, 'logs')
        if not os.path.exists(log_dir):
            os.mkdir(log_dir)
        #logger = Logger(log_dir)

        ################# Train start#################
        print('Training is started.')
        avg_loss = []
        step = 0
        self.model.train()
        ###  debug ###

        ### debug end ###
        for epoch in range(self.num_epochs):
            epoch_loss = 0
            for iter, data in enumerate(train_data_loader):
                LR = data['img_LR']
                HR = data['img_HR']

                #only use Y channel
                input_Y = LR[:, 0:1, :, :]
                target_Y = HR[:, 0:1, :, :]
                if self.scale_factor == 4:
                    target_Y = utils.shave(
                        target_Y, border_size=2 * self.scale_factor
                    )  #according to size of the output image passed the network
                elif self.scale_factor == 6:
                    target_Y = utils.shave(target_Y,
                                           border_size=2 * self.scale_factor -
                                           1)
                elif self.scale_factor == 2:
                    target_Y = utils.shave(target_Y,
                                           border_size=2 * self.scale_factor -
                                           1)
                else:
                    target_Y = utils.shave(target_Y,
                                           border_size=2 * self.scale_factor -
                                           2)

                if self.save_inImg == True:  #save the net input image
                    saveinY = (input_Y.numpy()[0, :, :, :].transpose(1, 2, 0) *
                               255).astype(numpy.uint8)
                    imageio.imsave(
                        'predicted_Y/' + str(iter) + 'predicted.png',
                        saveinY[:, :, 0])
                    #scipy.misc.imsave('lrin.png', saveinY[:, :, 0]);

                    savetarY = (
                        target_Y.numpy()[0, :, :, :].transpose(1, 2, 0) *
                        255).astype(numpy.uint8)
                    imageio.imsave('target_Y/' + str(iter) + 'tarin3.png',
                                   savetarY[:, :, 0])
                    #scipy.misc.imsave('tarin.png', savetarY[:, :, 0]);

                if self.gpu_mode:
                    target = Variable(target_Y.cuda())
                    #print("target.size()")
                    #print(target.size())
                    input = Variable(input_Y.cuda())
                    #print("input.size():", input.size())
                else:
                    target = Variable(target_Y)
                    # target = Variable(utils.shave(target_Y, border_size=2*self.scale_factor))
                    input = Variable(input_Y)

                ############## ORIGINAL ###############

                self.optimizer.zero_grad()
                recon_image = self.model(input)
                #print("recon_image.size(): ")
                #print(recon_image.size())
                # if self.scale_factor ==2:
                #     recon_image = recon_image[:,:,1:-1,1:-1]
                # elif self.scale_factor == 3:
                #     recon_image = recon_image[:, :, 0:-1, 0:-1]

                #### SSIM loss ##############
                # loss = 1-self.loss(recon_image, target)

                loss = self.loss(recon_image, target)

                # print loss.data
                loss.backward()
                self.optimizer.step()
                lr = self.decay_learning_rate(epoch)
                # log
                epoch_loss += loss.data
                # tensorboard logging
                #logger.scalar_summary('loss', loss.data, step + 1)

                #print('loss', loss.data)
                step += 1

            print('epoch_loss: ', epoch_loss)
            if epoch % self.save_epochs == 0:
                #onnx_name = '_x' + str(self.scale_factor) + '_' + self.model_name + '_epoch_' + str(epoch) + '.onnx'
                #torch_out = torch.onnx.export(self.model, input, onnx_name, export_params=True, verbose=True)
                self.save_model(epoch)
                #save_path = os.path.join(ckpt_dir, "{}_{}.pth".format(self.ckpt_name, epoch))
                #torch.save(self.model.state_dict(), save_path)
                self.validation(epoch, val_data_loader)
            avg_loss.append(epoch_loss / len(train_data_loader))
            print("Epoch: [%2d] [%4d/%4d] loss: %.8f" %
                  ((epoch + 1),
                   (iter + 1), len(train_data_loader), epoch_loss))

        # Plot avg. loss
        utils.plot_loss([avg_loss], self.num_epochs, save_dir=self.save_dir)
        print("Training is finished.")

        # Save final trained parameters of model
        self.save_model(epoch=None)
Beispiel #14
0
    def train(self):
        # networks
        self.model = Net(num_channels=self.num_channels, base_filter=64)

        # weigh initialization
        self.model.weight_init(mean=0.0, std=0.001)

        # optimizer
        self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr)

        # loss function
        if self.gpu_mode:
            self.model.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.model)
        print('----------------------------------------------')

        # load dataset
        train_data_loader = self.load_dataset(dataset='train')
        test_data_loader = self.load_dataset(dataset='test')

        # set the logger
        log_dir = os.path.join(self.save_dir, 'logs')
        if not os.path.exists(log_dir):
            os.mkdir(log_dir)
        logger = Logger(log_dir)

        ################# Train #################
        print('Training is started.')
        avg_loss = []
        step = 0

        # test image
        test_input, test_target = test_data_loader.dataset.__getitem__(2)
        test_input = test_input.unsqueeze(0)
        test_target = test_target.unsqueeze(0)

        self.model.train()
        for epoch in range(self.num_epochs):

            epoch_loss = 0
            for iter, (input, target) in enumerate(train_data_loader):
                # input data (bicubic interpolated image)
                if self.gpu_mode:
                    # exclude border pixels from loss computation
                    x_ = Variable(utils.shave(target, border_size=8).cuda())
                    y_ = Variable(utils.img_interp(input, self.scale_factor).cuda())
                else:
                    x_ = Variable(utils.shave(target, border_size=8))
                    y_ = Variable(utils.img_interp(input, self.scale_factor))

                # update network
                self.optimizer.zero_grad()
                recon_image = self.model(y_)
                loss = self.MSE_loss(recon_image, x_)
                loss.backward()
                self.optimizer.step()

                # log
                epoch_loss += loss.data[0]
                print("Epoch: [%2d] [%4d/%4d] loss: %.8f" % ((epoch + 1), (iter + 1), len(train_data_loader), loss.data[0]))

                # tensorboard logging
                logger.scalar_summary('loss', loss.data[0], step + 1)
                step += 1

            # avg. loss per epoch
            avg_loss.append(epoch_loss / len(train_data_loader))

            # prediction
            recon_imgs = self.model(Variable(utils.img_interp(test_input, self.scale_factor).cuda()))
            recon_img = recon_imgs[0].cpu().data
            gt_img = utils.shave(test_target[0], border_size=8)
            lr_img = test_input[0]
            bc_img = utils.shave(utils.img_interp(test_input[0], self.scale_factor), border_size=8)

            # calculate psnrs
            bc_psnr = utils.PSNR(bc_img, gt_img)
            recon_psnr = utils.PSNR(recon_img, gt_img)

            # save result images
            result_imgs = [gt_img, lr_img, bc_img, recon_img]
            psnrs = [None, None, bc_psnr, recon_psnr]
            utils.plot_test_result(result_imgs, psnrs, epoch + 1, save_dir=self.save_dir, is_training=True)

            print("Saving training result images at epoch %d" % (epoch + 1))

            # Save trained parameters of model
            if (epoch + 1) % self.save_epochs == 0:
                self.save_model(epoch + 1)

        # Plot avg. loss
        utils.plot_loss([avg_loss], self.num_epochs, save_dir=self.save_dir)
        print("Training is finished.")

        # Save final trained parameters of model
        self.save_model(epoch=None)
Beispiel #15
0
        out_img_s = out_s.detach().numpy().squeeze()
        out_img_s = utils.convert_shape(out_img_s)

        out_img_p = out_p.detach().numpy().squeeze()
        out_img_p = utils.convert_shape(out_img_p)

    if opt.isHR:
        if opt.only_y is True:
            im_label = utils.quantize(sc.rgb2ycbcr(im_gt)[:, :, 0])
            im_pre = utils.quantize(sc.rgb2ycbcr(out_img_c)[:, :, 0])
        else:
            im_label = im_gt
            im_pre = out_img_c

        psnr_sr[i] = utils.compute_psnr(
            utils.shave(im_label, opt.upscale_factor),
            utils.shave(im_pre, opt.upscale_factor))
        ssim_sr[i] = utils.compute_ssim(
            utils.shave(im_label, opt.upscale_factor),
            utils.shave(im_pre, opt.upscale_factor))
    i += 1

    output_c_folder = os.path.join(
        opt.output_folder,
        imname.split('/')[-1].split('.')[0] + '_c.png')
    output_s_folder = os.path.join(
        opt.output_folder,
        imname.split('/')[-1].split('.')[0] + '_s.png')
    output_p_folder = os.path.join(
        opt.output_folder,
        imname.split('/')[-1].split('.')[0] + '_p.png')
Beispiel #16
0
    im_input = torch.from_numpy(im_input).float()

    if cuda:
        model = model.to(device)
        im_input = im_input.to(device)

    with torch.no_grad():
        start.record()
        out = model(im_input)
        end.record()
        torch.cuda.synchronize()
        time_list[i] = start.elapsed_time(end)  # milliseconds

    out_img = utils.tensor2np(out.detach()[0])
    crop_size = opt.upscale_factor
    cropped_sr_img = utils.shave(out_img, crop_size)
    cropped_gt_img = utils.shave(im_gt, crop_size)
    if opt.is_y is True:
        im_label = utils.quantize(sc.rgb2ycbcr(cropped_gt_img)[:, :, 0])
        im_pre = utils.quantize(sc.rgb2ycbcr(cropped_sr_img)[:, :, 0])
    else:
        im_label = cropped_gt_img
        im_pre = cropped_sr_img
    psnr_list[i] = utils.compute_psnr(im_pre, im_label)
    ssim_list[i] = utils.compute_ssim(im_pre, im_label)


    output_folder = os.path.join(opt.output_folder,
                                 imname.split('/')[-1].split('.')[0] + 'x' + str(opt.upscale_factor) + '.png')

    if not os.path.exists(opt.output_folder):
        data[111] = '    kernel_size: {}\n'.format(int(2 * SCALE - SCALE % 2))
        data[112] = '    stride: {}\n'.format(int(SCALE))
        data[115] = '    pad: {}\n'.format(int(SCALE // 2))
        data[129] = '            offset: {}\n'.format(
            int(SCALE * MDL_PARAMS['border_size']))

        with open(MDL_PARAMS['def_file'], 'w') as f:
            f.writelines(data)

        # initialize ESPCN model
        espcn = ESPCNSR(MDL_PARAMS)

        im_h_y.append(espcn.upscale_alg(im_l_y[i]))

        if SHAVE == 1:
            im_gt_y[i] = utils.shave(im_gt_y[i], int(SCALE))
            im_h_y[-1] = utils.shave(im_h_y[-1], int(SCALE))

        # data range 0~255
        im_h_y_uint8 = np.rint(np.clip(im_h_y[-1], 0, 255))
        im_gt_y_uint8 = np.rint(np.clip(im_gt_y[i], 0, 255))

        # data range 0~1
        # im_h_y_uint8 = np.rint( np.clip(im_h_y * 255, 0, 255))
        # im_gt_y_uint8 = np.rint( np.clip(im_gt_y[i] * 255, 0, 255))

        diff = np.abs(im_h_y_uint8 - im_gt_y_uint8).flatten()
        res['rmse'][i] = np.sqrt(np.mean(np.square(diff)))
        res['psnr'][i] = 20 * np.log10(255.0 / res['rmse'][i])
        print 'rmse={}, psnr={}'.format(res['rmse'][i], res['psnr'][i])