Example #1
0
    def evaluate(self, loader):
        opt = self.opt
        self.net.eval()

        if opt.save_result:
            save_root = os.path.join(opt.save_root, opt.dataset)
            os.makedirs(save_root, exist_ok=True)

        psnr = 0

        for i, inputs in enumerate(loader):  # patch_list, target, filename
            clean_im = inputs[1].squeeze(0)
            filename = str(inputs[2])[2:-3]

            restore_patch = []
            for patch_idx, patch in enumerate(inputs[0]):
                patch = patch.to(self.dev).squeeze(0)
                outputs = self.net(patch).squeeze(0).clamp(
                    0, 255).round().cpu().byte().permute(1, 2, 0).numpy()
                restore_patch.append(outputs)

            # merge 8 restored patches
            h, w = clean_im.size()[1:]
            h_half, w_half = int(h/2), int(w/2)
            h_quarter, w_quarter = int(h_half/2), int(w_half/2)
            h_shave, w_shave = int(h_quarter/2), int(w_quarter/2)

            restore_im = np.ndarray((h, w, 3))

            restore_im[0:h_half, 0:w_quarter, :] = restore_patch[0][
                0:h_half, 0:w_quarter, :]
            restore_im[0:h_half, w_quarter:w_half, :] = restore_patch[1][
                0:h_half, 0:-w_shave, :]
            restore_im[0:h_half, w_half:w_half+w_quarter, :] = restore_patch[2][
                0:h_half, 0:w_quarter, :]
            restore_im[0:h_half, w_half+w_quarter:w, :] = restore_patch[3][
                0:h_half, w_shave:, :]
            restore_im[h_half:h, 0:w_quarter, :] = restore_patch[4][
                h_shave:, 0:w_quarter, :]
            restore_im[h_half:h, w_quarter:w_half, :] = restore_patch[5][
                h_shave:, 0:-w_shave, :]
            restore_im[h_half:h, w_half:w_half+w_quarter, :] = restore_patch[6][
                h_shave:, 0:w_quarter, :]
            restore_im[h_half:h, w_half+w_quarter:w, :] = restore_patch[7][
                h_shave:, w_shave:, :]

            clean_im = clean_im.cpu().byte().permute(1, 2, 0).numpy().astype(np.uint8)
            restore_im = restore_im.astype(np.uint8)

            if opt.save_result:
                save_path = os.path.join(save_root, f"{filename}")
                io.imsave(save_path, restore_im)

            psnr_tmp = utils.calculate_psnr(clean_im, restore_im)
            print(f'{i}th image PSNR: {psnr_tmp}')
            psnr += psnr_tmp
            self.net.train()

        return psnr/len(loader)
Example #2
0
    def evaluate(self):
        opt = self.opt
        self.net.eval()

        psnr = 0
        for i, inputs in enumerate(self.test_loader):
            HR = inputs[0].to(self.dev)
            LR = inputs[1].to(self.dev)
            ORI_LR = LR.clone().detach()

            # match the resolution of (LR, HR) due to CutBlur
            if HR.size() != LR.size():
                scale = HR.size(2) // LR.size(2)
                LR = F.interpolate(LR, scale_factor=scale, mode="nearest")

            SR = self.net(LR)
            if isinstance(SR, (list, tuple)):
                SR = SR[-1]

            SR = SR.detach()
            # iter over batch
            for i in range(HR.size(0)):
                hr = HR[i].clamp(0,
                                 255).round().cpu().byte().permute(1, 2,
                                                                   0).numpy()
                sr = SR[i].clamp(0,
                                 255).round().cpu().byte().permute(1, 2,
                                                                   0).numpy()

                hr = hr[opt.crop:-opt.crop, opt.crop:-opt.crop, :]
                sr = sr[opt.crop:-opt.crop, opt.crop:-opt.crop, :]
                if opt.eval_y_only:
                    hr = utils.rgb2ycbcr(hr)
                    sr = utils.rgb2ycbcr(sr)
                psnr += utils.calculate_psnr(hr, sr)
            if opt.save_result:
                save_root = os.path.join(opt.save_root, opt.dataset)
                save_path = os.path.join(save_root, "{:04d}.png".format(i + 1))
                utils.save_batch_hr_lr(HR, SR, ORI_LR, save_path)
                # io.imsave(save_path, SR)

        self.net.train()

        return psnr / len(self.test_loader.dataset)
Example #3
0
def calculate_psnr_ssim_ESRGAN():
    dir = "/home/jakaria/Super_Resolution/Filter_Enhance_Detect/saved_ESRGAN/val_images/*/*"
    HR_DIR = "/home/jakaria/Super_Resolution/Datasets/COWC/DetectionPatches_256x256/Potsdam_ISPRS/HR/x4/valid_img/"
    img_SR = sorted(glob.glob(dir + '_300000.png'))

    psnr_SR = 0
    ssim_SR = 0

    total = len(img_SR)
    print(total)

    i = 0

    for im_SR in img_SR:
        print(os.path.basename(im_SR) + '--')
        im_gt = os.path.basename(im_SR)
        im_gt = im_gt.rsplit('_', 1)[0] + ".jpg"
        im_gt = os.path.join(HR_DIR, im_gt)
        print(im_gt)

        image_SR = cv2.imread(im_SR)
        image_SR = cv2.cvtColor(image_SR, cv2.COLOR_BGR2RGB)
        cv2.imwrite(im_SR, image_SR)

        image_gt = cv2.imread(im_gt)
        image_SR = cv2.imread(im_SR)

        psnr_SR += calculate_psnr(image_gt, image_SR)
        ssim_SR += calculate_ssim(image_gt, image_SR)

        i += 1
        print(i)

    avg_psnr_SR = psnr_SR / total
    avg_ssim_SR = ssim_SR / total

    text_file = open(
        "/home/jakaria/Super_Resolution/Filter_Enhance_Detect/saved_ESRGAN/Output.txt",
        "a")
    print("SR PSNR: %4.2f" % avg_psnr_SR)
    text_file.write("SR PSNR: %4.2f \n" % avg_psnr_SR)
    print("SR SSIM: %5.4f" % avg_ssim_SR)
    text_file.write("SR SSIM: %5.4f \n" % avg_ssim_SR)
Example #4
0
    def evaluate(self):
        opt = self.opt
        self.net.eval()

        if opt.save_result:
            save_root = os.path.join(opt.save_root, opt.dataset)
            os.makedirs(save_root, exist_ok=True)

        psnr = 0
        for i, inputs in enumerate(self.test_loader):
            HR = inputs[0].to(self.dev)
            LR = inputs[1].to(self.dev)

            # match the resolution of (LR, HR) due to CutBlur
            if HR.size() != LR.size():
                scale = HR.size(2) // LR.size(2)
                LR = F.interpolate(LR, scale_factor=scale, mode="nearest")

            SR = self.net(LR).detach()
            HR = HR[0].clamp(0, 255).round().cpu().byte().permute(1, 2,
                                                                  0).numpy()
            SR = SR[0].clamp(0, 255).round().cpu().byte().permute(1, 2,
                                                                  0).numpy()

            if opt.save_result:
                save_path = os.path.join(save_root, "{:04d}.png".format(i + 1))
                io.imsave(save_path, SR)

            HR = HR[opt.crop:-opt.crop, opt.crop:-opt.crop, :]
            SR = SR[opt.crop:-opt.crop, opt.crop:-opt.crop, :]
            if opt.eval_y_only:
                HR = utils.rgb2ycbcr(HR)
                SR = utils.rgb2ycbcr(SR)
            psnr += utils.calculate_psnr(HR, SR)

        self.net.train()

        return psnr / len(self.test_loader)
    def process_video(self, loader, save_images=True):

        psnr_all = []

        while not loader.is_done():

            input_sequence, gt, gt_path = loader.get_sequence()
            gen = self.g_model(input_sequence)
            psnr_frame = calculate_psnr(gen, gt, val_range=2)
            psnr_all.append(psnr_frame)

            if save_images:
                gen_folder = makedir(join(c.IMG_SAVE_DIR, gt_path.split('/')[-2]))
                gen_path = join(gen_folder, gt_path.split('/')[-1])
                gen_uint8 = float32_to_uint8(gen)
                imsave(gen_path, var2np(gen_uint8))

        psnr_mean = np.mean(psnr_all)

        # Plotting
        video_name = gt_path.split('/')[-2]
        plot_prediction(video_name, psnr_all)

        return psnr_mean
Example #6
0
def evaluate(model, update_step, writer, bucket, engine):
    device = torch.device('cuda:0')
    model.eval()
    eval_paths = [os.path.join(args.eval_path, v) for v in ['Set14', 'Set5']]
    metrics_list = []

    unnorm = UnNormalize(0.5, 0.5)
    for eval_path in eval_paths:
        eval_name = os.path.basename(eval_path)
        HQ_path = os.path.join(eval_path, eval_name) + '.lmdb'
        LQ_path = os.path.join(eval_path, eval_name) + '_LQ.lmdb'
        LQ_r_path = os.path.join(eval_path, eval_name) + '_LQ_restored.lmdb'

        eval_set = ValDataset(HQ_path, LQ_path, LQ_r_path, args.scale)
        eval_loader = DataLoader(
            eval_set, batch_size=1, shuffle=False, num_workers=4)

        psrn_rgb = 0.0
        psrn_y = 0.0
        ssim_rgb = 0.0
        ssim_y = 0.0

        for i, data_dict in enumerate(eval_loader):
            img_HQ = data_dict['img_GT']
            img_LQ = data_dict['img_LQ'].to(device)
            img_LQ_r = data_dict['img_LQ_r']

            with torch.no_grad():
                # SR image range [-1, 1]
                img_SR = model(img_LQ)
                # SR image range [0, 1]
                img_SR = unnorm(img_SR)
            if i == 0:
                imgs = torch.cat([img_HQ, img_SR.detach().cpu(), img_LQ_r], dim=0)
                grid = vutils.make_grid(imgs, nrow=3, normalize=False)
                tmp_image = T.ToPILImage()(grid)
                tmp_image.save('images/tmp_image.png')
                upload_to_cloud(bucket, 'images/tmp_image.png',
                                'odesr01_04/image_progress/{}/gen_step_{}'.
                                format(eval_name, update_step * args.update_freq))
                if eval_name == 'Set5':
                    writer.add_image('Set5', grid, update_step)

            crop_size = args.scale
            img_HQ_rgb = img_HQ[0].permute(2, 1, 0).cpu(). \
                numpy()[crop_size:-crop_size, crop_size:-crop_size, :]
            img_SR_rgb = img_SR[0].permute(2, 1, 0).detach().cpu(). \
                numpy()[crop_size:-crop_size, crop_size:-crop_size, :]
            img_HQ_y = rgb2ycbcr(img_HQ_rgb)
            img_SR_y = rgb2ycbcr(img_SR_rgb)

            psrn_rgb += calculate_psnr(img_HQ_rgb * 255, img_SR_rgb * 255)
            psrn_y += calculate_psnr(img_HQ_y * 255, img_SR_y * 255)
            ssim_rgb += calculate_ssim(img_HQ_rgb * 255, img_SR_rgb * 255)
            ssim_y += calculate_ssim(img_HQ_y * 255, img_SR_y * 255)

        psrn_rgb = psrn_rgb / len(eval_loader.dataset)
        psrn_y = psrn_y / len(eval_loader.dataset)
        ssim_rgb = ssim_rgb / len(eval_loader.dataset)
        ssim_y = ssim_y / len(eval_loader.dataset)

        metrics_list.extend([psrn_rgb, psrn_y, ssim_rgb, ssim_y])

        if eval_name == 'Set5':
            writer.add_scalar('psrn_rgb', psrn_rgb, update_step)
            writer.add_scalar('psrn_y', psrn_y, update_step)
            writer.add_scalar('ssim_rgb', ssim_rgb, update_step)
            writer.add_scalar('ssim_y', ssim_y, update_step)

    query = '''
        INSERT INTO odesr01_04_val
            (set14_psnr_rgb, set14_psnr_y, set14_ssim_rgb, set14_ssim_y,
            set5_psnr_rgb, set5_psnr_y, set5_ssim_rgb, set5_ssim_y)
        VALUES (%f, %f, %f, %f, %f, %f, %f, %f)
    ''' % tuple(metrics_list)
    engine.execute(query)
    model.train()
Example #7
0
    def evaluate(self,
                 dataloader,
                 save_dir,
                 phase='test',
                 save_result=False,
                 eval_step=-1):
        self.rand_G.eval()
        self.studio_G.eval()
        psnr_studio = 0
        ssim_studio = 0
        psnr_rand = 0
        ssim_rand = 0
        tqdm_data_loader = tqdm(dataloader, desc=phase, leave=False)
        idx = 0
        if save_result:
            studio_img_dir = os.path.join(save_dir, f'{phase}_studio')
            rand_img_dir = os.path.join(save_dir, f'{phase}_rand')
            os.makedirs(studio_img_dir, exist_ok=True)
            os.makedirs(rand_img_dir, exist_ok=True)
        for i, inputs in enumerate(tqdm_data_loader):
            rand_img = inputs['rand_lc'].to(self.device)
            studio_img = inputs['base'].to(self.device)

            fake_studio_img, light_vec_forward = self.studio_G(rand_img)

            fake_rand_img = self.rand_G(studio_img, light_vec_forward)
            crop_size = 10
            fake_studio = tensor2im(fake_studio_img)
            fake_rand = tensor2im(fake_rand_img)
            rand = tensor2im(rand_img)
            studio = tensor2im(studio_img)
            fake_studio = fake_studio[:, crop_size:-crop_size,
                                      crop_size:-crop_size]
            fake_rand = fake_rand[:, crop_size:-crop_size,
                                  crop_size:-crop_size]
            gt_studio = studio[:, crop_size:-crop_size, crop_size:-crop_size]
            gt_rand = rand[:, crop_size:-crop_size, crop_size:-crop_size]
            for j in range(rand_img.shape[0]):
                gt_rand_j = gt_rand[j, :, :]
                gt_studio_j = gt_studio[j, :, :]
                fake_rand_j = fake_rand[j, :, :]
                fake_studio_j = fake_studio[j, :, :]
                if save_result:

                    def save_result(path, gt, fake):
                        gt_dir = os.path.join(path, 'gt')
                        fake_dir = os.path.join(path, 'fake')
                        os.makedirs(gt_dir, exist_ok=True)
                        os.makedirs(fake_dir, exist_ok=True)
                        gt_file = os.path.join(gt_dir, f'{idx + 1}.jpg')
                        io.imsave(gt_file, gt)
                        fake_file = os.path.join(fake_dir, f'{idx + 1}.jpg')
                        io.imsave(fake_file, fake)

                    save_result(studio_img_dir, gt_studio_j, fake_studio_j)
                    save_result(rand_img_dir, gt_rand_j, fake_rand_j)
                psnr_studio += calculate_psnr(gt_studio_j, fake_studio_j)
                psnr_rand += calculate_psnr(gt_rand_j, fake_rand_j)
                ssim_studio += structural_similarity(gt_studio_j,
                                                     fake_studio_j,
                                                     data_range=255,
                                                     multichannel=False,
                                                     gaussian_weights=True,
                                                     K1=0.01,
                                                     K2=0.03)
                ssim_rand += structural_similarity(gt_rand_j,
                                                   fake_rand_j,
                                                   data_range=255,
                                                   multichannel=False,
                                                   gaussian_weights=True,
                                                   K1=0.01,
                                                   K2=0.03)
                idx += 1
            if eval_step != -1 and (i + 1) % eval_step == 0:
                break

        self.rand_G.train()
        self.studio_G.train()

        return {
            'psnr_rand': psnr_rand / idx,
            'ssim_rand': ssim_rand / idx,
            'psnr_studio': psnr_studio / idx,
            'ssim_studio': ssim_studio / idx
        }
Example #8
0
def calculate_psnr_ssim():
    dir = "/home/jakaria/Super_Resolution/Filter_Enhance_Detect/saved/"
    HR_DIR = "/home/jakaria/Super_Resolution/Datasets/COWC/DetectionPatches_256x256/Potsdam_ISPRS/HR/x4/valid_img/*"
    bicubic_DIR = "/home/jakaria/Super_Resolution/Datasets/COWC/DetectionPatches_256x256/Potsdam_ISPRS/Bic/x4/valid_img/*"
    img_GT = sorted(glob.glob(HR_DIR + '.jpg'))
    img_final_SR_enhanced_1 = sorted(
        glob.glob(dir + '/enhanced_SR_images_1/*.png'))
    img_final_SR_enhanced_2 = sorted(
        glob.glob(dir + '/enhanced_SR_images_2/*.png'))
    img_final_SR_enhanced_3 = sorted(
        glob.glob(dir + '/enhanced_SR_images_3/*.png'))
    img_final_SR = sorted(glob.glob(dir + '/final_SR_images_216000/*.png'))
    img_SR = sorted(glob.glob(dir + '/SR_images/*.png'))
    img_SR_combined = sorted(
        glob.glob(dir + '/combined_SR_images_216000/*.png'))
    img_Bic = sorted(glob.glob(bicubic_DIR + '.jpg'))

    psnr_enhanced_1 = 0
    psnr_enhanced_2 = 0
    psnr_enhanced_3 = 0
    psnr_final = 0
    psnr_SR = 0
    psnr_SR_combined = 0
    psnr_Bic = 0

    ssim_enhanced_1 = 0
    ssim_enhanced_2 = 0
    ssim_enhanced_3 = 0
    ssim_final = 0
    ssim_SR = 0
    ssim_SR_combined = 0
    ssim_Bic = 0

    total = len(img_SR)
    print(total)

    i = 0

    for im_gt, im_enhanced_1, im_enhanced_2, im_enhanced_3, im_final, im_SR, \
            im_SR_combined, im_Bic in zip(img_GT,
                                            img_final_SR_enhanced_1,
                                            img_final_SR_enhanced_2,
                                            img_final_SR_enhanced_3,
                                            img_final_SR, img_SR, img_SR_combined,
                                            img_Bic):
        print(
            os.path.basename(im_gt) + '--',
            os.path.basename(im_enhanced_1) + '--',
            os.path.basename(im_enhanced_2) + '--',
            os.path.basename(im_enhanced_3) + '--',
            os.path.basename(im_final) + '--',
            os.path.basename(im_SR) + '--',
            os.path.basename(im_SR_combined) + '--', os.path.basename(im_Bic))

        image_gt = cv2.imread(im_gt)
        image_enhanced_1 = cv2.imread(im_enhanced_1)
        image_enhanced_2 = cv2.imread(im_enhanced_2)
        image_enhanced_3 = cv2.imread(im_enhanced_3)
        image_final = cv2.imread(im_final)
        image_SR = cv2.imread(im_SR)
        image_SR_combined = cv2.imread(im_SR_combined)
        image_Bic = cv2.imread(im_Bic)

        psnr_enhanced_1 += calculate_psnr(image_gt, image_enhanced_1)
        psnr_enhanced_2 += calculate_psnr(image_gt, image_enhanced_2)
        psnr_enhanced_3 += calculate_psnr(image_gt, image_enhanced_3)
        psnr_final += calculate_psnr(image_gt, image_final)
        psnr_SR += calculate_psnr(image_gt, image_SR)
        psnr_SR_combined += calculate_psnr(image_gt, image_SR_combined)
        psnr_Bic += calculate_psnr(image_gt, image_Bic)

        ssim_enhanced_1 += calculate_ssim(image_gt, image_enhanced_1)
        ssim_enhanced_2 += calculate_ssim(image_gt, image_enhanced_2)
        ssim_enhanced_3 += calculate_ssim(image_gt, image_enhanced_3)
        ssim_final += calculate_ssim(image_gt, image_final)
        ssim_SR += calculate_ssim(image_gt, image_SR)
        ssim_SR_combined += calculate_ssim(image_gt, image_SR_combined)
        ssim_Bic += calculate_ssim(image_gt, image_Bic)

        i += 1
        print(i)

    avg_psnr_enhanced_1, avg_psnr_enhanced_2, avg_psnr_enhanced_3,  avg_psnr_final, \
        avg_psnr_SR, avg_psnr_SR_combined, avg_psnr_Bic = (psnr_enhanced_1 / total,
                                                           psnr_enhanced_2 / total,
                                                           psnr_enhanced_3 / total,
                                                           psnr_final / total,
                                                           psnr_SR / total,
                                                           psnr_SR_combined / total,
                                                           psnr_Bic / total)

    avg_ssim_enhanced_1, avg_ssim_enhanced_2, avg_ssim_enhanced_3, avg_ssim_final, \
        avg_ssim_SR, avg_ssim_SR_combined, avg_ssim_Bic = (ssim_enhanced_1 / total,
                                                           ssim_enhanced_2 / total,
                                                           ssim_enhanced_3 / total,
                                                           ssim_final / total,
                                                           ssim_SR / total,
                                                           ssim_SR_combined / total,
                                                           ssim_Bic / total)

    text_file = open(
        "/home/jakaria/Super_Resolution/Filter_Enhance_Detect/saved/Output_216000.txt",
        "a")
    print("Enhanced PSNR_1: %4.2f" % avg_psnr_enhanced_1)
    text_file.write("Enhanced PSNR_1: %4.2f \n" % avg_psnr_enhanced_1)
    print("Enhanced PSNR_2: %4.2f" % avg_psnr_enhanced_2)
    text_file.write("Enhanced PSNR_2: %4.2f \n" % avg_psnr_enhanced_2)
    print("Enhanced PSNR_3: %4.2f" % avg_psnr_enhanced_3)
    text_file.write("Enhanced PSNR_3: %4.2f \n" % avg_psnr_enhanced_3)
    print("Final PSNR: %4.2f" % avg_psnr_final)
    text_file.write("Final PSNR: %4.2f \n" % avg_psnr_final)
    print("SR PSNR: %4.2f" % avg_psnr_SR)
    text_file.write("SR PSNR: %4.2f \n" % avg_psnr_SR)
    print("SR PSNR_combined: %4.2f" % avg_psnr_SR_combined)
    text_file.write("SR PSNR_combined: %4.2f \n" % avg_psnr_SR_combined)
    print("Bic PSNR: %4.2f" % avg_psnr_Bic)
    text_file.write("Bic PSNR: %4.2f \n" % avg_psnr_Bic)

    print("Enhanced SSIM_1: %5.4f" % avg_ssim_enhanced_1)
    text_file.write("Enhanced SSIM_1: %5.4f \n" % avg_ssim_enhanced_1)
    print("Enhanced SSIM_2: %5.4f" % avg_ssim_enhanced_2)
    text_file.write("Enhanced SSIM_2: %5.4f \n" % avg_ssim_enhanced_2)
    print("Enhanced SSIM_3: %5.4f" % avg_ssim_enhanced_3)
    text_file.write("Enhanced SSIM_3: %5.4f \n" % avg_ssim_enhanced_3)
    print("Final SSIM: %5.4f" % avg_ssim_final)
    text_file.write("Final SSIM: %5.4f \n" % avg_ssim_final)
    print("SR SSIM: %5.4f" % avg_ssim_SR)
    text_file.write("SR SSIM: %5.4f \n" % avg_ssim_SR)
    print("SR SSIM_combined: %5.4f" % avg_ssim_SR_combined)
    text_file.write("SR SSIM_combined: %5.4f \n" % avg_ssim_SR_combined)
    print("Bic SSIM: %5.4f" % avg_ssim_Bic)
    text_file.write("Bic SSIM: %5.4f \n" % avg_ssim_Bic)
    text_file.close()
Example #9
0
def main():
    #### setup options of three networks
    parser = argparse.ArgumentParser()
    parser.add_argument("-opt", type=str, help="Path to option YMAL file.")
    parser.add_argument(
        "--launcher", choices=["none", "pytorch"], default="none", help="job launcher"
    )
    parser.add_argument("--local_rank", type=int, default=0)
    args = parser.parse_args()
    opt = option.parse(args.opt, is_train=True)

    # convert to NoneDict, which returns None for missing keys
    opt = option.dict_to_nonedict(opt)

    # choose small opt for SFTMD test, fill path of pre-trained model_F
    #### set random seed
    # seed = opt["train"]["manual_seed"]
    # if seed is None:
    #     seed = random.randint(1, 10000)

    # load PCA matrix of enough kernel
    print("load PCA matrix")
    pca_matrix = torch.load(
        opt["pca_matrix_path"], map_location=lambda storage, loc: storage
    )
    print("PCA matrix shape: {}".format(pca_matrix.shape))

    #### distributed training settings
    if args.launcher == "none":  # disabled distributed training
        opt["dist"] = False
        opt["dist"] = False
        rank = -1
        print("Disabled distributed training.")
    else:
        opt["dist"] = True
        opt["dist"] = True
        init_dist()
        world_size = (
            torch.distributed.get_world_size()
        )  # Returns the number of processes in the current process group
        rank = torch.distributed.get_rank()  # Returns the rank of current process group
        util.set_random_seed(0)

    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = True

    ###### Predictor&Corrector train ######

    #### loading resume state if exists
    if opt["path"].get("resume_state", None):
        # distributed resuming: all load into default GPU
        device_id = torch.cuda.current_device()
        resume_state = torch.load(
            opt["path"]["resume_state"],
            map_location=lambda storage, loc: storage.cuda(device_id),
        )
        option.check_resume(opt, resume_state["iter"])  # check resume options
    else:
        resume_state = None

    #### mkdir and loggers
    if rank <= 0:  # normal training (rank -1) OR distributed training (rank 0-7)
        if resume_state is None:
            # Predictor path
            util.mkdir_and_rename(
                opt["path"]["experiments_root"]
            )  # rename experiment folder if exists
            util.mkdirs(
                (
                    path
                    for key, path in opt["path"].items()
                    if not key == "experiments_root"
                    and "pretrain_model" not in key
                    and "resume" not in key
                )
            )
            os.system("rm ./log")
            os.symlink(os.path.join(opt["path"]["experiments_root"], ".."), "./log")

        # config loggers. Before it, the log will not work
        util.setup_logger(
            "base",
            opt["path"]["log"],
            "train_" + opt["name"],
            level=logging.INFO,
            screen=False,
            tofile=True,
        )
        util.setup_logger(
            "val",
            opt["path"]["log"],
            "val_" + opt["name"],
            level=logging.INFO,
            screen=False,
            tofile=True,
        )
        logger = logging.getLogger("base")
        logger.info(option.dict2str(opt))
        # tensorboard logger
        if opt["use_tb_logger"] and "debug" not in opt["name"]:
            version = float(torch.__version__[0:3])
            if version >= 1.1:  # PyTorch 1.1
                from torch.utils.tensorboard import SummaryWriter
            else:
                logger.info(
                    "You are using PyTorch {}. Tensorboard will use [tensorboardX]".format(
                        version
                    )
                )
                from tensorboardX import SummaryWriter
            tb_logger = SummaryWriter(log_dir="log/{}/tb_logger/".format(opt["name"]))
    else:
        util.setup_logger(
            "base", opt["path"]["log"], "train", level=logging.INFO, screen=False
        )
        logger = logging.getLogger("base")

    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = True

    #### create train and val dataloader
    dataset_ratio = 200  # enlarge the size of each epoch
    for phase, dataset_opt in opt["datasets"].items():
        if phase == "train":
            train_set = create_dataset(dataset_opt)
            train_size = int(math.ceil(len(train_set) / dataset_opt["batch_size"]))
            total_iters = int(opt["train"]["niter"])
            total_epochs = int(math.ceil(total_iters / train_size))
            if opt["dist"]:
                train_sampler = DistIterSampler(
                    train_set, world_size, rank, dataset_ratio
                )
                total_epochs = int(
                    math.ceil(total_iters / (train_size * dataset_ratio))
                )
            else:
                train_sampler = None
            train_loader = create_dataloader(train_set, dataset_opt, opt, train_sampler)
            if rank <= 0:
                logger.info(
                    "Number of train images: {:,d}, iters: {:,d}".format(
                        len(train_set), train_size
                    )
                )
                logger.info(
                    "Total epochs needed: {:d} for iters {:,d}".format(
                        total_epochs, total_iters
                    )
                )
        elif phase == "val":
            val_set = create_dataset(dataset_opt)
            val_loader = create_dataloader(val_set, dataset_opt, opt, None)
            if rank <= 0:
                logger.info(
                    "Number of val images in [{:s}]: {:d}".format(
                        dataset_opt["name"], len(val_set)
                    )
                )
        else:
            raise NotImplementedError("Phase [{:s}] is not recognized.".format(phase))
    assert train_loader is not None
    assert val_loader is not None

    #### create model
    model = create_model(opt)  # load pretrained model of SFTMD

    #### resume training
    if resume_state:
        logger.info(
            "Resuming training from epoch: {}, iter: {}.".format(
                resume_state["epoch"], resume_state["iter"]
            )
        )

        start_epoch = resume_state["epoch"]
        current_step = resume_state["iter"]
        model.resume_training(resume_state)  # handle optimizers and schedulers
    else:
        current_step = 0
        start_epoch = 0

    prepro = util.SRMDPreprocessing(
        scale=opt["scale"], pca_matrix=pca_matrix, cuda=True, **opt["degradation"]
    )
    kernel_size = opt["degradation"]["ksize"]
    padding = kernel_size // 2
    #### training
    logger.info(
        "Start training from epoch: {:d}, iter: {:d}".format(start_epoch, current_step)
    )
    for epoch in range(start_epoch, total_epochs + 1):
        if opt["dist"]:
            train_sampler.set_epoch(epoch)
        for _, train_data in enumerate(train_loader):
            current_step += 1

            if current_step > total_iters:
                break
            LR_img, ker_map, kernels = prepro(train_data["GT"], True)
            LR_img = (LR_img * 255).round() / 255

            model.feed_data(
                LR_img, GT_img=train_data["GT"], ker_map=ker_map, kernel=kernels
            )
            model.optimize_parameters(current_step)
            model.update_learning_rate(
                current_step, warmup_iter=opt["train"]["warmup_iter"]
            )
            visuals = model.get_current_visuals()

            if current_step % opt["logger"]["print_freq"] == 0:
                logs = model.get_current_log()
                message = "<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> ".format(
                    epoch, current_step, model.get_current_learning_rate()
                )
                for k, v in logs.items():
                    message += "{:s}: {:.4e} ".format(k, v)
                    # tensorboard logger
                    if opt["use_tb_logger"] and "debug" not in opt["name"]:
                        if rank <= 0:
                            tb_logger.add_scalar(k, v, current_step)
                if rank == 0:
                    logger.info(message)

            # validation, to produce ker_map_list(fake)
            if current_step % opt["train"]["val_freq"] == 0 and rank <= 0:
                avg_psnr = 0.0
                idx = 0
                for _, val_data in enumerate(val_loader):

                    # LR_img, ker_map = prepro(val_data['GT'])
                    LR_img = val_data["LQ"]
                    lr_img = util.tensor2img(LR_img)  # save LR image for reference

                    # valid Predictor
                    model.feed_data(LR_img, val_data["GT"])
                    model.test()
                    visuals = model.get_current_visuals()

                    # Save images for reference
                    img_name = val_data["LQ_path"][0]
                    img_dir = os.path.join(opt["path"]["val_images"], img_name)
                    # img_dir = os.path.join(opt['path']['val_images'], str(current_step), '_', str(step))
                    util.mkdir(img_dir)
                    save_lr_path = os.path.join(img_dir, "{:s}_LR.png".format(img_name))
                    util.save_img(lr_img, save_lr_path)

                    sr_img = util.tensor2img(visuals["SR"].squeeze())  # uint8
                    gt_img = util.tensor2img(visuals["GT"].squeeze())  # uint8

                    save_img_path = os.path.join(
                        img_dir, "{:s}_{:d}.png".format(img_name, current_step)
                    )

                    kernel = (
                        visuals["ker"]
                        .numpy()
                        .reshape(
                            opt["degradation"]["ksize"], opt["degradation"]["ksize"]
                        )
                    )
                    kernel = 1 / (np.max(kernel) + 1e-4) * 255 * kernel
                    cv2.imwrite(save_img_path, kernel)
                    util.save_img(sr_img, save_img_path)

                    # calculate PSNR
                    crop_size = opt["scale"]
                    gt_img = gt_img / 255.0
                    sr_img = sr_img / 255.0
                    cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size]
                    cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size]

                    avg_psnr += util.calculate_psnr(
                        cropped_sr_img * 255, cropped_gt_img * 255
                    )
                    idx += 1

                avg_psnr = avg_psnr / idx

                # log
                logger.info("# Validation # PSNR: {:.6f}".format(avg_psnr))
                logger_val = logging.getLogger("val")  # validation logger
                logger_val.info(
                    "<epoch:{:3d}, iter:{:8,d}, psnr: {:.6f}".format(
                        epoch, current_step, avg_psnr
                    )
                )
                # tensorboard logger
                if opt["use_tb_logger"] and "debug" not in opt["name"]:
                    tb_logger.add_scalar("psnr", avg_psnr, current_step)

            #### save models and training states
            if current_step % opt["logger"]["save_checkpoint_freq"] == 0:
                if rank <= 0:
                    logger.info("Saving models and training states.")
                    model.save(current_step)
                    model.save_training_state(epoch, current_step)

    if rank <= 0:
        logger.info("Saving the final model.")
        model.save("latest")
        logger.info("End of Predictor and Corrector training.")
    tb_logger.close()
Example #10
0
            gt_img = gt_img / 255.0
            sr_img = sr_img / 255.0

            crop_border = opt["crop_border"] if opt["crop_border"] else opt["scale"]
            if crop_border == 0:
                cropped_sr_img = sr_img
                cropped_gt_img = gt_img
            else:
                cropped_sr_img = sr_img[
                    crop_border:-crop_border, crop_border:-crop_border, :
                ]
                cropped_gt_img = gt_img[
                    crop_border:-crop_border, crop_border:-crop_border, :
                ]

            psnr = util.calculate_psnr(cropped_sr_img * 255, cropped_gt_img * 255)
            ssim = util.calculate_ssim(cropped_sr_img * 255, cropped_gt_img * 255)

            test_results["psnr"].append(psnr)
            test_results["ssim"].append(ssim)

            if gt_img.shape[2] == 3:  # RGB image
                sr_img_y = bgr2ycbcr(sr_img, only_y=True)
                gt_img_y = bgr2ycbcr(gt_img, only_y=True)
                if crop_border == 0:
                    cropped_sr_img_y = sr_img_y
                    cropped_gt_img_y = gt_img_y
                else:
                    cropped_sr_img_y = sr_img_y[
                        crop_border:-crop_border, crop_border:-crop_border
                    ]
Example #11
0
def test(test_loader, net, scale, scene_name):
    train_mode = False
    net.eval()
    count = 0
    PSNR = 0
    SSIM = 0
    PSNR_t = 0
    SSIM_t = 0
    out = []
    for image_num, data in enumerate(test_loader):
        x_input, target = data[0], data[1]
        # print(x_input.shape)
        B, _, _, _, _ = x_input.shape
        with torch.no_grad():
            x_input = Variable(x_input).cuda()
            target = Variable(target).cuda()
            t0 = time.time()
            # prediction = net(x_input)
            # ensamble test
            if True:
                # x_input = Variable(x_input).cuda()
                x_input_list = [x_input]
                for tf in ['v', 'h', 't']:
                    x_input_list.extend(
                        [test_ensamble(t, tf) for t in x_input_list])
                prediction_list = [net(aug) for aug in x_input_list]
                for i in range(len(prediction_list)):
                    if i > 3:
                        prediction_list[i] = test_ensamble_2(
                            prediction_list[i], 't')
                    if i % 4 > 1:
                        prediction_list[i] = test_ensamble_2(
                            prediction_list[i], 'h')
                    if (i % 4) % 2 == 1:
                        prediction_list[i] = test_ensamble_2(
                            prediction_list[i], 'v')
                prediction_cat = torch.cat(prediction_list, dim=0)
                prediction = prediction_cat.mean(dim=0, keepdim=True)

        torch.cuda.synchronize()
        t1 = time.time()
        print("===> Timer: %.4f sec." % (t1 - t0))
        prediction = prediction.unsqueeze(2)
        count += 1
        prediction = prediction.squeeze(0).permute(1, 2, 3, 0)  # [T,H,W,C]
        prediction = prediction.cpu().numpy(
        )[:, :, :, ::-1]  # tensor -> numpy, rgb -> bgr
        target = target.squeeze(0).permute(1, 2, 3, 0)  # [T,H,W,C]
        target = target.cpu().numpy()[:, :, :, ::
                                      -1]  # tensor -> numpy, rgb -> bgr

        save_img(prediction[0], scene_name, 2)
        # test_Y______________________
        # prediction_Y = bgr2ycbcr(prediction[0])
        # target_Y = bgr2ycbcr(target[0])
        # prediction_Y = prediction_Y * 255
        # target_Y = target_Y * 255
        # test_RGB _______________________________
        prediction_Y = prediction[0] * 255
        target_Y = target[0] * 255
        # ________________________________
        # calculate PSNR and SSIM
        print('PSNR: {:.6f} dB, \tSSIM: {:.6f}'.format(
            calculate_psnr(prediction_Y, target_Y),
            calculate_ssim(prediction_Y, target_Y)))
        PSNR += calculate_psnr(prediction_Y, target_Y)
        SSIM += calculate_ssim(prediction_Y, target_Y)
        out.append(calculate_psnr(prediction_Y, target_Y))

        print('===>{} PSNR = {}'.format(scene_name, PSNR))
        print('===>{} SSIM = {}'.format(scene_name, SSIM))
        PSNR_t = PSNR
        SSIM_t = SSIM

    return PSNR_t, SSIM_t