def define_G(opt): opt_net = opt['network_G'] which_model = opt_net['which_model_G'] # image restoration if which_model == 'MSRResNet': netG = SRResNet_arch.MSRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'], upscale=opt_net['scale']) elif which_model == 'RRDBNet': netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb']) # video restoration elif which_model == 'EDVR': netG = EDVR_arch.EDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'EDVR_att_non_local': netG = EDVR_arch.EDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA'], channel_att=True, non_local=False) else: raise NotImplementedError('Generator model [{:s}] not recognized'.format(which_model)) return netG
def __init__(self, **conf): self.CUDA_VISIBLE_DEVICES = 0 self.device = 'cpu' if self.CUDA_VISIBLE_DEVICES is None else 'cuda' self.ckpt_path = '../experiments/pretrained_models/EDVR_latest.pth' self.padding = 'new_info' self.batch = 4 self.split_H = 540 self.split_W = 960 self.network_conf = { 'nf': 64, 'nframes': 5, 'groups': 8, 'front_RBs': 5, 'back_RBs': 10, 'predeblur': False, 'HR_in': False, 'w_TSA': True, } for k, v in conf.items(): setattr(self, k, v) os.environ['CUDA_VISIBLE_DEVICES'] = str(self.CUDA_VISIBLE_DEVICES) with torch.no_grad(): self.model = EDVR_arch.EDVR(**self.network_conf) # set up the models self.model.load_state_dict(torch.load(self.ckpt_path), strict=True) self.model.eval() self.model = self.model.to(self.device)
def define_G(opt): opt_net = opt['network_G'] which_model = opt_net['which_model_G'] # image restoration if which_model == 'MSRResNet': netG = SRResNet_arch.MSRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'], upscale=opt_net['scale']) elif which_model == 'RRDBNet': netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb']) # video restoration elif which_model == 'EDVR': netG = EDVR_arch.EDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA'], scale=opt['scale']) elif which_model == 'DUF': if opt_net['layers'] == 16: netG = DUF_arch.DUF_16L(scale=opt['scale'], adapt_official=True) elif opt_net['layers'] == 28: netG = DUF_arch.DUF_28L(scale=opt['scale'], adapt_official=True) else: netG = DUF_arch.DUF_52L(scale=opt['scale'], adapt_official=True) elif which_model == 'TOF': netG = TOF_arch.TOFlow(adapt_official=True) else: raise NotImplementedError('Generator model [{:s}] not recognized'.format(which_model)) return netG
def define_G(opt): opt_net = opt['network_G'] which_model = opt_net['which_model_G'] # image restoration if which_model == 'MSRResNet': netG = SRResNet_arch.MSRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'], upscale=opt_net['scale']) elif which_model == 'RRDBNet': netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb']) # video restoration elif which_model == 'EDVR': import models.archs.EDVR_arch as EDVR_arch netG = EDVR_arch.EDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA'], w_GCB=opt_net['w_GCB']) # elif which_model == 'EDVR_woDCN': # import models.archs.EDVR_woDCN_arch as EDVR_arch # netG = EDVR_arch.EDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], # groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], # back_RBs=opt_net['back_RBs'], center=opt_net['center'], # predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], # w_TSA=opt_net['w_TSA'], w_GCB=opt_net['w_GCB']) elif which_model == 'MGANet': netG = Gen_Guided_UNet(input_size=opt_net['input_size']) elif which_model == 'Unet': import repo.CycleGAN.networks as unet_networks netG = unet_networks.define_G(2 * 3, 1, opt_net['nf'], opt_net['G_type'], opt_net['norm'], opt_net['dropout'], opt_net['init_type'], opt_net['init_gain']) else: raise NotImplementedError( 'Generator model [{:s}] not recognized'.format(which_model)) return netG
def init_model(self): """init model and load it to device""" model = EDVR_arch.EDVR(128, self.N_in, 8, 5, self.back_RBs, predeblur=self.predeblur, HR_in=self.HR_in) #### evaluation self.crop_border = 0 self.border_frame = self.N_in // 2 # border frames when evaluate # temporal padding mode if self.data_mode == 'Vid4' or self.data_mode == 'sharp_bicubic': self.padding = 'new_info' else: self.padding = 'replicate' model.load_state_dict(torch.load(self.model_path), strict=True) model.eval() model = model.to(self.device) return model
def define_G(opt): opt_net = opt['network_G'] which_model = opt_net['which_model_G'] # image restoration if which_model == 'CNLRN': netG = CNLRN_arch.CNLRN(n_colors=opt_net['n_colors'], n_deblur_blocks=opt_net['n_deblur_blocks'], n_nlrgs_body=opt_net['n_nlrgs_body'], n_nlrgs_up1=opt_net['n_nlrgs_up1'], n_nlrgs_up2=opt_net['n_nlrgs_up2'], n_subgroups=opt_net['n_subgroups'], n_rcabs=opt_net['n_rcabs'], n_feats=opt_net['n_feats'], nonlocal_psize=opt_net['nonlocal_psize'], scale=opt_net['scale']) elif which_model == 'PreDeblur': netG = PreDeblur_arch.PreDeblur( n_colors=opt_net['n_colors'], n_deblur_blocks=opt_net['n_deblur_blocks'], n_feats=opt_net['n_feats']) elif which_model == 'RRDBNet': netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb']) # video restoration elif which_model == 'EDVR': netG = EDVR_arch.EDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) else: raise NotImplementedError( 'Generator model [{:s}] not recognized'.format(which_model)) return netG
def define_G(opt): opt_net = opt["network_G"] which_model = opt_net["which_model_G"] if which_model == "EDVR": netG = EDVR_arch.EDVR( nf=opt_net["nf"], nframes=opt_net["nframes"], groups=opt_net["groups"], front_RBs=opt_net["front_RBs"], back_RBs=opt_net["back_RBs"], center=opt_net["center"], predeblur=opt_net["predeblur"], HR_in=opt_net["HR_in"], w_TSA=opt_net["w_TSA"], ) else: raise NotImplementedError( "Generator model [{:s}] not recognized".format(which_model)) return netG
def define_G(opt): opt_net = opt['network_G'] which_model = opt_net['which_model_G'] # image restoration if which_model == 'MSRResNet': netG = SRResNet_arch.MSRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'], upscale=opt_net['scale']) elif which_model == 'RRDBNet': netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb']) # video restoration elif which_model == 'EDVR': netG = EDVR_arch.EDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'JASRNet': netG = JASRNet_arch.JASR(n_Parts=opt_net['n_Parts'], n_resblocks=opt_net['n_resblocks'], n_feats=opt_net['n_feats'], scale=opt_net['scale'], rgb_range=opt_net['rgb_range'], n_colors=opt_net['n_colors']) else: raise NotImplementedError( 'Generator model [{:s}] not recognized'.format(which_model)) return netG
def main(): ################# # configurations ################# stage = 1 device = torch.device("cuda") os.environ["CUDA_VISIBLE_DEVICES"] = "0" logger = logging.getLogger("base") # tensorboard # tb_logger = SummaryWriter(log_dir="../tb_logger/" + "vinhlong_040719_1212") data_mode = "licensePlate_blur_bicubic_EDVRstock" flip_test = False model_path = "/content/EDVR/EDVRstock.pth" nf = 64 N_in = 5 predeblur, HR_in = False, False back_RBs = 10 test_dataset_folder = "/content/EDVR/datasets/license_plate5/BI_x4" GT_dataset_folder = "/content/EDVR/datasets/license_plate5/GT" if stage == 2: model_path = ("/content/EDVR/experiments/" + "pretrained_models/EDVR_REDS_deblur_Stage2.pth") nf = 128 predeblur, HR_in = True, True back_RBs = 20 test_dataset_folder = "/content/EDVR/datasets/licensePlate_blur_bicubic" GT_dataset_folder = "" model = EDVR_arch.EDVR(nf, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == "Vid4" or data_mode == "sharp_bicubic": padding = "new_info" else: padding = "replicate" save_imgs = True # Reconfig logger handler save_folder = "../results/{}".format(data_mode) # remove all old handlers to avoid duplicate for hdlr in logger.handlers[:]: logger.removeHandler(hdlr) util.mkdirs(save_folder) util.setup_logger("base", save_folder, "test", level=logging.INFO, screen=True, tofile=True) #### log info logger.info("Data: {} - {}".format(data_mode, test_dataset_folder)) logger.info("Padding mode: {}".format(padding)) logger.info("Model path: {}".format(model_path)) logger.info("Save images: {}".format(save_imgs)) logger.info("Flip test: {}".format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, "*"))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, "*"))) isGT = bool(GT_dataset_folder) # for each subfolder for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, "*"))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = test_util.read_img_seq(subfolder) img_GT_l = [] if isGT: for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, "*"))): img_GT_l.append(test_util.read_img(img_GT_path)) avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = ( 0, 0, 0, 0, 0, ) # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = test_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = (imgs_LQ.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).to(device)) if flip_test: output = test_util.flipx4_forward(model, imgs_in) else: output = test_util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite( osp.join(save_subfolder, "{}.png".format(img_name)), output) if isGT: # calculate PSNR output = output / 255.0 GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel if data_mode == "Vid4": # bgr2y, [0, 1] GT = data_util.bgr2ycbcr(GT, only_y=True) output = data_util.bgr2ycbcr(output, only_y=True) output, GT = test_util.crop_border([output, GT], crop_border) crt_psnr = util.calculate_psnr(output * 255, GT * 255) """ logger.info( "{:3d} - {:25} \tPSNR: {:.6f} dB".format( img_idx + 1, img_name, crt_psnr ) ) """ if (img_idx >= border_frame and img_idx < max_idx - border_frame): # center frames avg_psnr_center += crt_psnr N_center += 1 else: # border frames avg_psnr_border += crt_psnr N_border += 1 else: logger.info("{:3d} - {:25} is generated".format( img_idx + 1, img_name)) if isGT: avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border) avg_psnr_center = avg_psnr_center / N_center avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border avg_psnr_l.append(avg_psnr) avg_psnr_center_l.append(avg_psnr_center) avg_psnr_border_l.append(avg_psnr_border) logger.info("Folder {} - Average PSNR: {:.6f} dB for {} frames; " "Center PSNR: {:.6f} dB for {} frames; " "Border PSNR: {:.6f} dB for {} frames.".format( subfolder_name, avg_psnr, (N_center + N_border), avg_psnr_center, N_center, avg_psnr_border, N_border, )) logger.info("################ Tidy Outputs ################") if isGT: for subfolder_name, psnr, psnr_center, psnr_border in zip( subfolder_name_l, avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l): logger.info("Folder {} - Average PSNR: {:.6f} dB. " "Center PSNR: {:.6f} dB. " "Border PSNR: {:.6f} dB.".format( subfolder_name, psnr, psnr_center, psnr_border)) logger.info("################ Final Results ################") logger.info("Data: {} - {}".format(data_mode, test_dataset_folder)) logger.info("Padding mode: {}".format(padding)) logger.info("Model path: {}".format(model_path)) logger.info("Save images: {}".format(save_imgs)) logger.info("Flip test: {}".format(flip_test)) logger.info("Is GT: {}".format(isGT)) if isGT: logger.info("Total Average PSNR: {:.6f} dB for {} clips. " "Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.".format( sum(avg_psnr_l) / len(avg_psnr_l), len(subfolder_l), sum(avg_psnr_center_l) / len(avg_psnr_center_l), sum(avg_psnr_border_l) / len(avg_psnr_border_l), ))
def define_G(opt): opt_net = opt['network_G'] which_model = opt_net['which_model_G'] # image restoration if which_model == 'MSRResNet': netG = SRResNet_arch.MSRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'], upscale=opt_net['scale']) elif which_model == 'RRDBNet': netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb']) elif which_model == 'RCAN': netG = RCAN_arch.RCAN(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], n_features=opt_net['nf'], n_resgroups=opt_net['ng'], n_resblocks=opt_net['nb'], reduction=opt_net['reduction'], scale=opt_net['scale'], res_scale=opt_net['res_scale']) # video restoration elif which_model == 'EDVR': netG = EDVR_arch.EDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'EDVR_DN': netG = EDVR_arch.EDVR_DN(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'EDVR_pyramid': netG = EDVR_arch.EDVR_pyramid(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'PFNL': netG = PFNL_arch.PFNL(nf=opt_net['nf'], nc=opt_net['nc'], nt=opt_net['nt'], r=opt_net['r'], scale=opt_net['scale']) else: raise NotImplementedError( 'Generator model [{:s}] not recognized'.format(which_model)) return netG
def main(): ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '1' stage = 1 # 1 or 2, use two stage strategy for REDS dataset. flip_test = False #### model data_mode = 'sharp' if stage == 1: model_path = '../experiments/001_EDVRwoTSA_scratch_lr4e-4_600k_SR4K_LrCAR4S_64_20_5/models/600000_G.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_Stage2.pth' N_in = 5 # use N_in images to restore one HR image predeblur, HR_in = False, False back_RBs = 20 if stage == 2: HR_in = True back_RBs = 20 model = EDVR_arch.EDVR(64, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in, w_TSA=True) #### dataset if stage == 1: test_dataset_folder = '/home/mcc/4khdr/image/540p_test' else: test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4' print('You should modify the test_dataset_folder path for stage 2') #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'sharp': padding = 'new_info' else: padding = 'replicate' save_imgs = True save_folder = '../results/{}'.format(data_mode) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) # for each subfolder for subfolder in subfolder_l: subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select(0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) if flip_test: output = util.flipx4_forward(model, imgs_in) else: output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite(osp.join(save_subfolder, '{}.png'.format(img_name)), output, [int(cv2.IMWRITE_PNG_COMPRESSION), 1]) logger.info('{:3d} - {:25}'.format(img_idx + 1, img_name))
def main(): ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '1' flip_test = False scale = 4 N_in = 5 predeblur, HR_in = False, False n_feats = 128 back_RBs = 40 save_imgs = False prog = argparse.ArgumentParser() prog.add_argument('--train_mode', '-t', type=str, default='REDS', help='train mode') prog.add_argument('--data_mode', '-m', type=str, default=None, help='data_mode') prog.add_argument('--degradation_mode', '-d', type=str, default='impulse', choices=('impulse', 'bicubic', 'preset'), help='path to image output directory.') prog.add_argument('--sigma_x', '-sx', type=float, default=1, help='sigma_x') prog.add_argument('--sigma_y', '-sy', type=float, default=0, help='sigma_y') prog.add_argument('--theta', '-th', type=float, default=0, help='theta') args = prog.parse_args() train_data_mode = args.train_mode data_mode = args.data_mode if data_mode is None: if train_data_mode == 'Vimeo': data_mode = 'Vid4' elif train_data_mode == 'REDS': data_mode = 'REDS' degradation_mode = args.degradation_mode # impulse | bicubic | preset sig_x, sig_y, the = args.sigma_x, args.sigma_y, args.theta if sig_y == 0: sig_y = sig_x ############################################################################ #### model if scale == 2: if train_data_mode == 'Vimeo': model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_M_Scale2_FT.pth' #model_path = '../experiments/pretrained_models/EDVR_M_BLIND_V_FT_report.pth' # model_path = '../experiments/pretrained_models/2500_G.pth' elif train_data_mode == 'REDS': model_path = '../experiments/pretrained_models/EDVR_REDS_SR_M_Scale2.pth' # model_path = '../experiments/pretrained_models/EDVR_M_BLIND_R_FT_report.pth' elif train_data_mode == 'Both': model_path = '../experiments/pretrained_models/EDVR_REDS+Vimeo90K_SR_M_Scale2_FT.pth' elif train_data_mode == 'MM522': model_path = '../experiments/pretrained_models/EDVR_MM522_SR_M_Scale2_FT.pth' else: raise NotImplementedError else: if data_mode == 'Vid4': model_path = '../experiments/pretrained_models/EDVR_BLIND_Vimeo_SR_L.pth' # model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth' elif data_mode == 'REDS': model_path = '../experiments/pretrained_models/EDVR_REDS_SR_L.pth' # model_path = '../experiments/pretrained_models/EDVR_BLIND_REDS_SR_L.pth' else: raise NotImplementedError model = EDVR_arch.EDVR(n_feats, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in, scale=scale) folder_subname = 'preset' if degradation_mode == 'preset' else degradation_mode + '_' + str( '{:.1f}'.format(sig_x)) + '_' + str( '{:.1f}'.format(sig_y)) + '_' + str('{:.1f}'.format(the)) #### dataset if data_mode == 'Vid4': # test_dataset_folder = '../dataset/Vid4/LR_bicubic/X{}'.format(scale) test_dataset_folder = '../dataset/Vid4/LR_{}/X{}'.format( folder_subname, scale) GT_dataset_folder = '../dataset/Vid4/HR' elif data_mode == 'MM522': test_dataset_folder = '../dataset/MM522val/LR_bicubic/X{}'.format( scale) GT_dataset_folder = '../dataset/MM522val/HR' else: # test_dataset_folder = '../dataset/REDS4/LR_bicubic/X{}'.format(scale) test_dataset_folder = '../dataset/REDS/train/LR_{}/X{}'.format( folder_subname, scale) GT_dataset_folder = '../dataset/REDS/train/HR' #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode padding = 'new_info' save_folder = '../results/{}'.format(data_mode) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] avg_ssim_l, avg_ssim_center_l, avg_ssim_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) if data_mode == 'REDS': subfolder_GT_l = [ k for k in subfolder_GT_l if k.find('000') >= 0 or k.find('011') >= 0 or k.find('015') >= 0 or k.find('020') >= 0 ] # for each subfolder for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): img_GT_l.append(data_util.read_img(None, img_GT_path)) avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 avg_ssim, avg_ssim_border, avg_ssim_center = 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) if flip_test: output = util.flipx4_forward(model, imgs_in) else: output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite( osp.join(save_subfolder, '{}.png'.format(img_name)), output) # calculate PSNR output = output / 255. GT = np.copy(img_GT_l[img_idx]) ''' output_tensor = torch.from_numpy(np.copy(output[:,:,::-1])).permute(2,0,1) GT_tensor = torch.from_numpy(np.copy(GT[:,:,::-1])).permute(2,0,1).type_as(output_tensor) torch.save(output_tensor.cpu(), '../results/sr_test.pt') torch.save(GT_tensor.cpu(), '../results/hr_test.pt') my_psnr = utility.calc_psnr(output_tensor, GT_tensor) GT_tensor = GT_tensor.cpu().numpy().transpose(1,2,0) imageio.imwrite('../results/hr_test.png', GT_tensor) print('saved', my_psnr) ''' ''' # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel if data_mode == 'Vid4' or 'sharp_bicubic' or 'MM522': # bgr2y, [0, 1] GT = data_util.bgr2ycbcr(GT, only_y=True) output = data_util.bgr2ycbcr(output, only_y=True) ''' output = (output * 255).round().astype('uint8') GT = (GT * 255).round().astype('uint8') output, GT = util.crop_border([output, GT], crop_border) crt_psnr = util.calculate_psnr(output, GT) crt_ssim = 0.001 #util.calculate_ssim(output, GT) # logger.info('{:3d} - {:16} \tPSNR: {:.6f} dB \tSSIM: {:.6f}'.format(img_idx + 1, img_name, crt_psnr, crt_ssim)) if img_idx >= border_frame and img_idx < max_idx - border_frame: # center frames avg_psnr_center += crt_psnr avg_ssim_center += crt_ssim N_center += 1 else: # border frames avg_psnr_border += crt_psnr avg_ssim_border += crt_ssim N_border += 1 avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border) avg_psnr_center = avg_psnr_center / N_center avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border avg_psnr_l.append(avg_psnr) avg_psnr_center_l.append(avg_psnr_center) avg_psnr_border_l.append(avg_psnr_border) avg_ssim = (avg_ssim_center + avg_ssim_border) / (N_center + N_border) avg_ssim_center = avg_ssim_center / N_center avg_ssim_border = 0 if N_border == 0 else avg_ssim_border / N_border avg_ssim_l.append(avg_ssim) avg_ssim_center_l.append(avg_ssim_center) avg_ssim_border_l.append(avg_ssim_border) logger.info('Folder {} - Average PSNR: {:.6f} dB for {} frames; ' 'Center PSNR: {:.6f} dB for {} frames; ' 'Border PSNR: {:.6f} dB for {} frames.'.format( subfolder_name, avg_psnr, (N_center + N_border), avg_psnr_center, N_center, avg_psnr_border, N_border)) logger.info('Folder {} - Average SSIM: {:.6f} for {} frames; ' 'Center SSIM: {:.6f} for {} frames; ' 'Border SSIM: {:.6f} for {} frames.'.format( subfolder_name, avg_ssim, (N_center + N_border), avg_ssim_center, N_center, avg_ssim_border, N_border)) ''' logger.info('################ Tidy Outputs ################') for subfolder_name, psnr, psnr_center, psnr_border in zip(subfolder_name_l, avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l): logger.info('Folder {} - Average PSNR: {:.6f} dB. ' 'Center PSNR: {:.6f} dB. ' 'Border PSNR: {:.6f} dB.'.format(subfolder_name, psnr, psnr_center, psnr_border)) for subfolder_name, ssim, ssim_center, ssim_border in zip(subfolder_name_l, avg_ssim_l, avg_ssim_center_l, avg_ssim_border_l): logger.info('Folder {} - Average SSIM: {:.6f}. ' 'Center SSIM: {:.6f}. ' 'Border SSIM: {:.6f}.'.format(subfolder_name, ssim, ssim_center, ssim_border)) ''' logger.info('################ Final Results ################') logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) logger.info('Total Average PSNR: {:.6f} dB for {} clips. ' 'Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.'.format( sum(avg_psnr_l) / len(avg_psnr_l), len(subfolder_l), sum(avg_psnr_center_l) / len(avg_psnr_center_l), sum(avg_psnr_border_l) / len(avg_psnr_border_l))) logger.info('Total Average SSIM: {:.6f} for {} clips. ' 'Center SSIM: {:.6f}. Border PSNR: {:.6f}.'.format( sum(avg_ssim_l) / len(avg_ssim_l), len(subfolder_l), sum(avg_ssim_center_l) / len(avg_ssim_center_l), sum(avg_ssim_border_l) / len(avg_ssim_border_l))) print('\n\n\n')
def main(): ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '6' test_set = 'AI4K_val' # Vid4 | YouKu10 | REDS4 | AI4K_val | zhibo | AI4K_val_bic test_name = 'PCD_Vis_Test_35_ResNet_alpha_beta_decoder_3x3_IN_encoder_8HW_A01xxx_900000_AI4K_5000' # 'AI4K_val_Denoise_A02_420000' data_mode = 'sharp_bicubic' # sharp_bicubic | blur_bicubic N_in = 5 # load test set if test_set == 'Vid4': test_dataset_folder = '../datasets/Vid4/BIx4' GT_dataset_folder = '../datasets/Vid4/GT' elif test_set == 'YouKu10': test_dataset_folder = '../datasets/YouKu10/LR' GT_dataset_folder = '../datasets/YouKu10/HR' elif test_set == 'YouKu_val': test_dataset_folder = '/data0/yhliu/DATA/YouKuVid/valid/valid_lr_bmp' GT_dataset_folder = '/data0/yhliu/DATA/YouKuVid/valid/valid_hr_bmp' elif test_set == 'REDS4': test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode) GT_dataset_folder = '../datasets/REDS4/GT' elif test_set == 'AI4K_val': test_dataset_folder = '/home/yhliu/AI4K/contest2/val2_LR_png/' GT_dataset_folder = '/home/yhliu/AI4K/contest1/val1_HR_png/' elif test_set == 'AI4K_val_bic': test_dataset_folder = '/home/yhliu/AI4K/contest1/val1_LR_png_bic/' GT_dataset_folder = '/home/yhliu/AI4K/contest1/val1_HR_png_bic/' elif test_set == 'zhibo': test_dataset_folder = '/data1/yhliu/SR_ZHIBO_VIDEO/Test_video_LR/' GT_dataset_folder = '/data1/yhliu/SR_ZHIBO_VIDEO/Test_video_HR/' flip_test = False #model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth' #model_path = '../experiments/A01b/models/250000_G.pth' #model_path = '../experiments/A02_predenoise/models/415000_G.pth' model_path = '../experiments/A37_color_EDVR_35_220000_A01_5in_64f_10b_128_pretrain_A01xxx_900000_fix_before_pcd/models/5000_G.pth' predeblur, HR_in = False, False back_RBs = 10 if data_mode == 'blur_bicubic': predeblur = True if data_mode == 'blur' or data_mode == 'blur_comp': predeblur, HR_in = True, True model = EDVR_arch.EDVR(64, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #model = my_EDVR_arch.MYEDVR(64, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #model = my_EDVR_arch.MYEDVR_RES(64, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'Vid4' or data_mode == 'sharp_bicubic': padding = 'new_info' else: padding = 'replicate' save_imgs = True #True | False save_folder = '../results/{}'.format(test_name) if test_set == 'zhibo': save_folder = '/data1/yhliu/SR_ZHIBO_VIDEO/SR_png_sample_150' util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) model = nn.DataParallel(model) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) print(subfolder_l) print(subfolder_GT_l) #exit() # for each subfolder for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) print(img_path_l) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): #print(img_GT_path) img_GT_l.append(data_util.read_img(None, img_GT_path)) #print(img_GT_l[0].shape) avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).cpu() #to(device) print(imgs_in.size()) if flip_test: output = util.flipx4_forward(model, imgs_in) else: start_time = time.time() output = util.single_forward(model, imgs_in) end_time = time.time() print('Forward One image:', end_time - start_time) output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite( osp.join(save_subfolder, '{}.png'.format(img_name)), output) # calculate PSNR output = output / 255. GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel ''' if data_mode == 'Vid4': # bgr2y, [0, 1] GT = data_util.bgr2ycbcr(GT, only_y=True) output = data_util.bgr2ycbcr(output, only_y=True) ''' output, GT = util.crop_border([output, GT], crop_border) crt_psnr = util.calculate_psnr(output * 255, GT * 255) logger.info('{:3d} - {:25} \tPSNR: {:.6f} dB'.format( img_idx + 1, img_name, crt_psnr)) if img_idx >= border_frame and img_idx < max_idx - border_frame: # center frames avg_psnr_center += crt_psnr N_center += 1 else: # border frames avg_psnr_border += crt_psnr N_border += 1 avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border) avg_psnr_center = avg_psnr_center / N_center avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border avg_psnr_l.append(avg_psnr) avg_psnr_center_l.append(avg_psnr_center) avg_psnr_border_l.append(avg_psnr_border) logger.info('Folder {} - Average PSNR: {:.6f} dB for {} frames; ' 'Center PSNR: {:.6f} dB for {} frames; ' 'Border PSNR: {:.6f} dB for {} frames.'.format( subfolder_name, avg_psnr, (N_center + N_border), avg_psnr_center, N_center, avg_psnr_border, N_border)) logger.info('################ Tidy Outputs ################') for subfolder_name, psnr, psnr_center, psnr_border in zip( subfolder_name_l, avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l): logger.info('Folder {} - Average PSNR: {:.6f} dB. ' 'Center PSNR: {:.6f} dB. ' 'Border PSNR: {:.6f} dB.'.format(subfolder_name, psnr, psnr_center, psnr_border)) logger.info('################ Final Results ################') logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) logger.info('Total Average PSNR: {:.6f} dB for {} clips. ' 'Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.'.format( sum(avg_psnr_l) / len(avg_psnr_l), len(subfolder_l), sum(avg_psnr_center_l) / len(avg_psnr_center_l), sum(avg_psnr_border_l) / len(avg_psnr_border_l)))
def define_G(opt): opt_net = opt['network_G'] which_model = opt_net['which_model_G'] # image restoration if which_model == 'MSRResNet': netG = SRResNet_arch.MSRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'], upscale=opt_net['scale']) elif which_model == 'RRDBNet': netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb']) # video restoration elif which_model == 'EDVR': netG = EDVR_arch.EDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'EDVR2X': netG = EDVR_arch.EDVR2X(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'EDVRImg': netG = EDVR_arch.EDVRImage(nf=opt_net['nf'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], down_scale=opt_net['down_scale']) elif which_model == 'EDVR3D': netG = EDVR_arch.EDVR3D(nf=opt_net['nf'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], down_scale=opt_net['down_scale']) elif which_model == 'UPEDVR': netG = EDVR_arch.UPEDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], w_TSA=opt_net['w_TSA'], down_scale=opt_net['down_scale'], align_target=opt_net['align_target'], ret_valid=opt_net['ret_valid']) elif which_model == 'UPContEDVR': netG = EDVR_arch.UPControlEDVR( nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], w_TSA=opt_net['w_TSA'], down_scale=opt_net['down_scale'], align_target=opt_net['align_target'], ret_valid=opt_net['ret_valid'], multi_scale_cont=opt_net['multi_scale_cont']) elif which_model == 'FlowUPContEDVR': netG = EDVR_arch.FlowUPControlEDVR( nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], w_TSA=opt_net['w_TSA'], down_scale=opt_net['down_scale'], align_target=opt_net['align_target'], ret_valid=opt_net['ret_valid'], multi_scale_cont=opt_net['multi_scale_cont']) # video SR for multiple target frames elif which_model == 'MultiEDVR': netG = EDVR_arch.MultiEDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) # arbitrary magnification video super-resolution elif which_model == 'MetaEDVR': netG = EDVR_arch.MetaEDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA'], fix_edvr=opt_net['fix_edvr']) else: raise NotImplementedError( 'Generator model [{:s}] not recognized'.format(which_model)) return netG
if data_mode == 'Vid4': N_in = 7 # use N_in images to restore one HR image else: N_in = 5 predeblur, HR_in = False, False back_RBs = 40 if data_mode == 'blur_bicubic': predeblur = True if data_mode == 'blur' or data_mode == 'blur_comp': predeblur, HR_in = True, True if stage == 2: HR_in = True back_RBs = 20 model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #### dataset if data_mode == 'Vid4': test_dataset_folder = '../datasets/Vid4/BIx4' GT_dataset_folder = '../datasets/Vid4/GT' else: if stage == 1: test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode) else: test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4' print('You should modify the test_dataset_folder path for stage 2') GT_dataset_folder = '../datasets/REDS4/GT' #### evaluation crop_border = 0
def main(): ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '1' data_mode = 'ai4khdr_test' flip_test = False ############################################################################ #### model ################# if data_mode == 'ai4khdr_test': model_path = '../experiments/002_EDVR_lr4e-4_600k_AI4KHDR/models/4000_G.pth' else: raise NotImplementedError N_in = 5 front_RBs = 5 back_RBs = 10 predeblur, HR_in = False, False model = EDVR_arch.EDVR(64, N_in, 8, front_RBs, back_RBs, predeblur=predeblur, HR_in=HR_in) ############################################################################ #### dataset ################# if data_mode == 'ai4khdr_test': test_dataset_folder = '/workspace/nas_mengdongwei/dataset/AI4KHDR/test/540p_frames' else: raise NotImplementedError ############################################################################ #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'ai4khdr_test': padding = 'new_info' else: padding = 'replicate' save_imgs = True save_folder = '../results/{}_{}'.format(data_mode, util.get_timestamp()) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=False) model.eval() model = model.to(device) subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) # for each subfolder for subfolder in subfolder_l: subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select(0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) if flip_test: output = util.flipx4_forward(model, imgs_in) else: output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite(osp.join(save_subfolder, '{}.png'.format(img_name)), output) logger.info('Folder {}'.format(subfolder_name)) logger.info('################ Final Results ################') logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test))
def main(): ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '0' data_mode = 'HDR' # Vid4 | sharp_bicubic | blur_bicubic | blur | blur_comp # Vid4: SR # REDS4: sharp_bicubic (SR-clean), blur_bicubic (SR-blur); # blur (deblur-clean), blur_comp (deblur-compression). stage = 1 # 1 or 2, use two stage strategy for REDS dataset. flip_test = True ############################################################################ #### model if data_mode == 'Vid4': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth' else: raise ValueError('Vid4 does not support stage 2.') elif data_mode == 'sharp_bicubic': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_Stage2.pth' elif data_mode == 'blur_bicubic': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_Stage2.pth' elif data_mode == 'blur': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_Stage2.pth' elif data_mode == 'blur_comp': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_Stage2.pth' elif data_mode == 'HDR': # model_path = '../experiments/001_EDVR_scratch_lr4e-4_600k_HDR_LrCAR4S/models/20000_G.pth' # model_path = '../experiments/005_EDVRwoTSA_scratch_lr4e-4_600k_HDR_LrCAR4S/models/490000_G.pth' model_path = '../experiments/pretrained_models/50000_G.pth' else: raise NotImplementedError if data_mode == 'Vid4': N_in = 7 # use N_in images to restore one HR image else: N_in = 5 predeblur, HR_in = False, False back_RBs = 10 if data_mode == 'blur_bicubic': predeblur = True if data_mode == 'blur' or data_mode == 'blur_comp': predeblur, HR_in = True, True if data_mode == 'HDR': predeblur = True if stage == 2: HR_in = True back_RBs = 20 model = EDVR_arch.EDVR(64, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in, w_TSA=True) #### dataset GT_dataset_folder = None if data_mode == 'Vid4': test_dataset_folder = '../datasets/Vid4/BIx4' GT_dataset_folder = '../datasets/Vid4/GT' elif data_mode == 'HDR': test_dataset_folder = '../datasets/HDR/valid/new_method/540p' GT_dataset_folder = '../datasets/HDR/valid/new_method/4k' # test_dataset_folder = '../datasets/HDR/valid/sequences_540' # GT_dataset_folder = '../datasets/HDR/valid/sequences_4k' else: if stage == 1: test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode) else: test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4' print('You should modify the test_dataset_folder path for stage 2') GT_dataset_folder = '../datasets/REDS4/GT' #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'Vid4' or data_mode == 'sharp_bicubic': padding = 'new_info' else: padding = 'replicate' save_imgs = True save_folder = '../results/{}_50000'.format(data_mode) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) # for each subfolder for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): # print(subfolder, subfolde if '10675978' not in subfolder: print('pass') continue subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): img_GT_l.append(data_util.read_img(None, img_GT_path)) avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) if flip_test: output = util.flipx4_forward(model, imgs_in) else: output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite( osp.join(save_subfolder, '{}.png'.format(img_name)), output) # calculate PSNR # output = output / 255. # GT = np.copy(img_GT_l[img_idx]) # # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel # if data_mode == 'Vid4': # bgr2y, [0, 1] # GT = data_util.bgr2ycbcr(GT, only_y=True) # output = data_util.bgr2ycbcr(output, only_y=True) # output, GT = util.crop_border([output, GT], crop_border) # crt_psnr = util.calculate_psnr(output * 255, GT * 255) # # crt_ssim = util.calculate_ssim(output * 255, GT * 255) logger.info('{:3d} - {:25} '.format(img_idx + 1, img_name))
def main(): #################### # arguments parser # #################### # [format] dataset(vid4, REDS4) N(number of frames) parser = argparse.ArgumentParser() parser.add_argument('dataset') parser.add_argument('n_frames') parser.add_argument('stage') args = parser.parse_args() data_mode = str(args.dataset) N_in = int(args.n_frames) stage = int(args.stage) #if args.command == 'start': # start(int(args.params[0])) #elif args.command == 'stop': # stop(args.params[0], int(args.params[1])) #elif args.command == 'stop_all': # stop_all(args.params[0]) ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '0' #data_mode = 'Vid4' # Vid4 | sharp_bicubic | blur_bicubic | blur | blur_comp # Vid4: SR # REDS4: sharp_bicubic (SR-clean), blur_bicubic (SR-blur); # blur (deblur-clean), blur_comp (deblur-compression). #stage = 1 # 1 or 2, use two stage strategy for REDS dataset. flip_test = False ############################################################################ #### model if data_mode == 'Vid4': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth' else: raise ValueError('Vid4 does not support stage 2.') elif data_mode == 'sharp_bicubic': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_Stage2.pth' elif data_mode == 'blur_bicubic': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_Stage2.pth' elif data_mode == 'blur': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_Stage2.pth' elif data_mode == 'blur_comp': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_Stage2.pth' else: raise NotImplementedError predeblur, HR_in = False, False back_RBs = 40 if data_mode == 'blur_bicubic': predeblur = True if data_mode == 'blur' or data_mode == 'blur_comp': predeblur, HR_in = True, True if stage == 2: HR_in = True back_RBs = 20 #### dataset if data_mode == 'Vid4': N_model_default = 7 test_dataset_folder = '../datasets/Vid4/BIx4' GT_dataset_folder = '../datasets/Vid4/GT' else: N_model_default = 5 if stage == 1: test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode) else: test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4' print('You should modify the test_dataset_folder path for stage 2') GT_dataset_folder = '../datasets/REDS4/GT' raw_model = EDVR_arch.EDVR(128, N_model_default, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'Vid4' or data_mode == 'sharp_bicubic': padding = 'new_info' else: padding = 'replicate' save_imgs = True data_mode_t = copy.deepcopy(data_mode) if stage == 1 and data_mode_t != 'Vid4': data_mode = 'REDS-EDVR_REDS_SR_L_flipx4' save_folder = '../results/{}'.format(data_mode) data_mode = copy.deepcopy(data_mode_t) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models print([a for a in dir(model) if not callable(getattr(model, a))]) # not a.startswith('__') and #model.load_state_dict(torch.load(model_path), strict=True) raw_model.load_state_dict(torch.load(model_path), strict=True) # model.load_state_dict(torch.load(model_path), strict=True) #### change model so it can work with less input model.nf = raw_model.nf model.center = N_in // 2 # if center is None else center model.is_predeblur = raw_model.is_predeblur model.HR_in = raw_model.HR_in model.w_TSA = raw_model.w_TSA #ResidualBlock_noBN_f = functools.partial(arch_util.ResidualBlock_noBN, nf=nf) #### extract features (for each frame) if model.is_predeblur: model.pre_deblur = raw_model.pre_deblur #Predeblur_ResNet_Pyramid(nf=nf, HR_in=self.HR_in) model.conv_1x1 = raw_model.conv_1x1 #nn.Conv2d(nf, nf, 1, 1, bias=True) else: if model.HR_in: model.conv_first_1 = raw_model.conv_first_1 #nn.Conv2d(3, nf, 3, 1, 1, bias=True) model.conv_first_2 = raw_model.conv_first_2 #nn.Conv2d(nf, nf, 3, 2, 1, bias=True) model.conv_first_3 = raw_model.conv_first_3 #nn.Conv2d(nf, nf, 3, 2, 1, bias=True) else: model.conv_first = raw_model.conv_first # nn.Conv2d(3, nf, 3, 1, 1, bias=True) model.feature_extraction = raw_model.feature_extraction # arch_util.make_layer(ResidualBlock_noBN_f, front_RBs) model.fea_L2_conv1 = raw_model.fea_L2_conv1 #nn.Conv2d(nf, nf, 3, 2, 1, bias=True) model.fea_L2_conv2 = raw_model.fea_L2_conv2 #nn.Conv2d(nf, nf, 3, 1, 1, bias=True) model.fea_L3_conv1 = raw_model.fea_L3_conv1 #nn.Conv2d(nf, nf, 3, 2, 1, bias=True) model.fea_L3_conv2 = raw_model.fea_L3_conv2 #nn.Conv2d(nf, nf, 3, 1, 1, bias=True) model.pcd_align = raw_model.pcd_align #PCD_Align(nf=nf, groups=groups) ######## Resize TSA model.tsa_fusion.center = model.center # temporal attention (before fusion conv) model.tsa_fusion.tAtt_1 = raw_model.tsa_fusion.tAtt_1 model.tsa_fusion.tAtt_2 = raw_model.tsa_fusion.tAtt_2 # fusion conv: using 1x1 to save parameters and computation #print(raw_model.tsa_fusion.fea_fusion.weight.shape) #print(raw_model.tsa_fusion.fea_fusion.weight.shape) #print(raw_model.tsa_fusion.fea_fusion.weight[127][639].shape) #print("MAIN SHAPE(FEA): ", raw_model.tsa_fusion.fea_fusion.weight.shape) model.tsa_fusion.fea_fusion = copy.deepcopy( raw_model.tsa_fusion.fea_fusion) model.tsa_fusion.fea_fusion.weight = copy.deepcopy( torch.nn.Parameter(raw_model.tsa_fusion.fea_fusion.weight[:, 0:N_in * 128, :, :])) #[:][] #nn.Conv2d(nframes * nf, nf, 1, 1, bias=True) #model.tsa_fusion.fea_fusion.bias = raw_model.tsa_fusion.fea_fusion.bias # spatial attention (after fusion conv) model.tsa_fusion.sAtt_1 = copy.deepcopy(raw_model.tsa_fusion.sAtt_1) model.tsa_fusion.sAtt_1.weight = copy.deepcopy( torch.nn.Parameter(raw_model.tsa_fusion.sAtt_1.weight[:, 0:N_in * 128, :, :])) #[:][] #nn.Conv2d(nframes * nf, nf, 1, 1, bias=True) #model.tsa_fusion.sAtt_1.bias = raw_model.tsa_fusion.sAtt_1.bias #print(N_in * 128) #print(raw_model.tsa_fusion.fea_fusion.weight[:, 0:N_in * 128, :, :].shape) print("MODEL TSA SHAPE: ", model.tsa_fusion.fea_fusion.weight.shape) model.tsa_fusion.maxpool = raw_model.tsa_fusion.maxpool model.tsa_fusion.avgpool = raw_model.tsa_fusion.avgpool model.tsa_fusion.sAtt_2 = raw_model.tsa_fusion.sAtt_2 model.tsa_fusion.sAtt_3 = raw_model.tsa_fusion.sAtt_3 model.tsa_fusion.sAtt_4 = raw_model.tsa_fusion.sAtt_4 model.tsa_fusion.sAtt_5 = raw_model.tsa_fusion.sAtt_5 model.tsa_fusion.sAtt_L1 = raw_model.tsa_fusion.sAtt_L1 model.tsa_fusion.sAtt_L2 = raw_model.tsa_fusion.sAtt_L2 model.tsa_fusion.sAtt_L3 = raw_model.tsa_fusion.sAtt_L3 model.tsa_fusion.sAtt_add_1 = raw_model.tsa_fusion.sAtt_add_1 model.tsa_fusion.sAtt_add_2 = raw_model.tsa_fusion.sAtt_add_2 model.tsa_fusion.lrelu = raw_model.tsa_fusion.lrelu #if model.w_TSA: # model.tsa_fusion = raw_model.tsa_fusion[:][:128 * N_in][:][:] #TSA_Fusion(nf=nf, nframes=nframes, center=self.center) #else: # model.tsa_fusion = raw_model.tsa_fusion[:][:128 * N_in][:][:] #nn.Conv2d(nframes * nf, nf, 1, 1, bias=True) # print(self.tsa_fusion) #### reconstruction model.recon_trunk = raw_model.recon_trunk # arch_util.make_layer(ResidualBlock_noBN_f, back_RBs) #### upsampling model.upconv1 = raw_model.upconv1 #nn.Conv2d(nf, nf * 4, 3, 1, 1, bias=True) model.upconv2 = raw_model.upconv2 #nn.Conv2d(nf, 64 * 4, 3, 1, 1, bias=True) model.pixel_shuffle = raw_model.pixel_shuffle # nn.PixelShuffle(2) model.HRconv = raw_model.HRconv model.conv_last = raw_model.conv_last #### activation function model.lrelu = raw_model.lrelu ##################################################### model.eval() model = model.to(device) avg_ssim_l, avg_ssim_center_l, avg_ssim_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) # for each subfolder for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) print("MAX_IDX: ", max_idx) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): img_GT_l.append(data_util.read_img(None, img_GT_path)) avg_ssim, avg_ssim_border, avg_ssim_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] if data_mode == "blur": select_idx = data_util.glarefree_index_generation( img_idx, max_idx, N_in, padding=padding) else: select_idx = data_util.index_generation( img_idx, max_idx, N_in, padding=padding) # HERE GOTCHA print("SELECT IDX: ", select_idx) imgs_in = imgs_LQ.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) if flip_test: output = util.flipx4_forward(model, imgs_in) else: print("IMGS_IN SHAPE: ", imgs_in.shape) # check this output = util.single_forward(model, imgs_in) # error here 1 output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite( osp.join(save_subfolder, '{}.png'.format(img_name)), output) # calculate SSIM output = output / 255. GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel if data_mode == 'Vid4': # bgr2y, [0, 1] GT = data_util.bgr2ycbcr(GT, only_y=True) output = data_util.bgr2ycbcr(output, only_y=True) output, GT = util.crop_border([output, GT], crop_border) crt_ssim = util.calculate_ssim(output * 255, GT * 255) logger.info('{:3d} - {:25} \tSSIM: {:.6f} dB'.format( img_idx + 1, img_name, crt_ssim)) if img_idx >= border_frame and img_idx < max_idx - border_frame: # center frames avg_ssim_center += crt_ssim N_center += 1 else: # border frames avg_ssim_border += crt_ssim N_border += 1 avg_ssim = (avg_ssim_center + avg_ssim_border) / (N_center + N_border) avg_ssim_center = avg_ssim_center / N_center avg_ssim_border = 0 if N_border == 0 else avg_ssim_border / N_border avg_ssim_l.append(avg_ssim) avg_ssim_center_l.append(avg_ssim_center) avg_ssim_border_l.append(avg_ssim_border) logger.info('Folder {} - Average SSIM: {:.6f} dB for {} frames; ' 'Center SSIM: {:.6f} dB for {} frames; ' 'Border SSIM: {:.6f} dB for {} frames.'.format( subfolder_name, avg_ssim, (N_center + N_border), avg_ssim_center, N_center, avg_ssim_border, N_border)) logger.info('################ Tidy Outputs ################') for subfolder_name, ssim, ssim_center, ssim_border in zip( subfolder_name_l, avg_ssim_l, avg_ssim_center_l, avg_ssim_border_l): logger.info('Folder {} - Average SSIM: {:.6f} dB. ' 'Center SSIM: {:.6f} dB. ' 'Border SSIM: {:.6f} dB.'.format(subfolder_name, ssim, ssim_center, ssim_border)) logger.info('################ Final Results ################') logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) logger.info('Total Average SSIM: {:.6f} dB for {} clips. ' 'Center SSIM: {:.6f} dB. Border SSIM: {:.6f} dB.'.format( sum(avg_ssim_l) / len(avg_ssim_l), len(subfolder_l), sum(avg_ssim_center_l) / len(avg_ssim_center_l), sum(avg_ssim_border_l) / len(avg_ssim_border_l)))
def main(args): print("===> Loading datasets") data_set = DatasetLoader(args.data_lr, args.data_hr, size_w=args.size_w, size_h=args.size_h, scale=args.scale, n_frames=args.n_frames, interval_list=args.interval_list, border_mode=args.border_mode, random_reverse=args.random_reverse) train_loader = DataLoader(data_set, batch_size=args.batch_size, num_workers=args.workers, shuffle=True, pin_memory=False, drop_last=True) #### random seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) cudnn.benchmark = True #cudnn.deterministic = True print("===> Building model") #### create model model = EDVR_arch.EDVR(nf=args.nf, nframes=args.n_frames, groups=args.groups, front_RBs=args.front_RBs, back_RBs=args.back_RBs, center=args.center, predeblur=args.predeblur, HR_in=args.HR_in, w_TSA=args.w_TSA) criterion = CharbonnierLoss() print("===> Setting GPU") gups = args.gpus if args.gpus != 0 else torch.cuda.device_count() device_ids = list(range(gups)) model = DataParallel(model, device_ids=device_ids) model = model.cuda() criterion = criterion.cuda() # print(model) start_epoch = args.start_epoch # optionally resume from a checkpoint if args.resume: if os.path.isdir(args.resume): # 获取目录中最后一个 pth_list = sorted(glob(os.path.join(args.resume, '*.pth'))) if len(pth_list) > 0: args.resume = pth_list[-1] if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) start_epoch = checkpoint['epoch'] + 1 state_dict = checkpoint['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): namekey = 'module.' + k # remove `module.` new_state_dict[namekey] = v model.load_state_dict(new_state_dict) # 如果文件中有lr,则不用启动参数 args.lr = checkpoint.get('lr', args.lr) # 如果设置了 start_epoch 则不用checkpoint中的epoch参数 start_epoch = args.start_epoch if args.start_epoch != 0 else start_epoch #如果use_current_lr大于0 测代替作为lr args.lr = args.use_current_lr if args.use_current_lr > 0 else args.lr optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay, betas=(args.beta1, args.beta2), eps=1e-8) #### training print("===> Training") for epoch in range(start_epoch, args.epochs): adjust_lr(optimizer, epoch) if args.use_tqdm == 1: losses, psnrs = one_epoch_train_tqdm( model, optimizer, criterion, len(data_set), train_loader, epoch, args.epochs, args.batch_size, optimizer.param_groups[0]["lr"]) else: losses, psnrs = one_epoch_train_logger( model, optimizer, criterion, len(data_set), train_loader, epoch, args.epochs, args.batch_size, optimizer.param_groups[0]["lr"]) # save model # if epoch %9 != 0: # continue model_out_path = os.path.join( args.checkpoint, "model_epoch_%04d_edvr_loss_%.3f_psnr_%.3f.pth" % (epoch, losses.avg, psnrs.avg)) if not os.path.exists(args.checkpoint): os.makedirs(args.checkpoint) torch.save( { 'state_dict': model.module.state_dict(), "epoch": epoch, 'lr': optimizer.param_groups[0]["lr"] }, model_out_path)
def val(model_name, current_step, arch='EDVR'): ################# # configurations ################# device = torch.device('cuda') #os.environ['CUDA_VISIBLE_DEVICES'] = '1,2,3,4' test_set = 'REDS4' # Vid4 | YouKu10 | REDS4 | AI4K_val data_mode = 'sharp_bicubic' # sharp_bicubic | blur_bicubic N_in = 5 # load test set if test_set == 'Vid4': test_dataset_folder = '../datasets/Vid4/BIx4' GT_dataset_folder = '../datasets/Vid4/GT' elif test_set == 'YouKu10': test_dataset_folder = '../datasets/YouKu10/LR' GT_dataset_folder = '../datasets/YouKu10/HR' elif test_set == 'YouKu_val': test_dataset_folder = '/data0/yhliu/DATA/YouKuVid/valid/valid_lr_bmp' GT_dataset_folder = '/data0/yhliu/DATA/YouKuVid/valid/valid_hr_bmp' elif test_set == 'REDS4': test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode) GT_dataset_folder = '../datasets/REDS4/GT' elif test_set == 'AI4K_val': test_dataset_folder = '/data0/yhliu/AI4K/contest1/val1_LR_png/' GT_dataset_folder = '/data0/yhliu/AI4K/contest1/val1_HR_png/' elif test_set == 'AI4K_val_small': test_dataset_folder = '/home/yhliu/AI4K/contest1/val1_LR_png_small/' GT_dataset_folder = '/home/yhliu/AI4K/contest1/val1_HR_png_small/' flip_test = False #model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth' model_path = os.path.join('../experiments/', model_name, 'models/{}_G.pth'.format(current_step)) predeblur, HR_in = False, False back_RBs = 10 if data_mode == 'blur_bicubic': predeblur = True if data_mode == 'blur' or data_mode == 'blur_comp': predeblur, HR_in = True, True if arch == 'EDVR': model = EDVR_arch.EDVR(64, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) elif arch == 'MY_EDVR': model = my_EDVR_arch.MYEDVR(64, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'Vid4' or data_mode == 'sharp_bicubic': padding = 'new_info' else: padding = 'replicate' save_imgs = False save_folder = '../validation/{}'.format(test_set) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) model = nn.DataParallel(model, device_ids=[0, 1, 2, 3]) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) #print(subfolder_l) #print(subfolder_GT_l) #exit() # for each subfolder for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) #print(img_path_l) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): #print(img_GT_path) img_GT_l.append(data_util.read_img(None, img_GT_path)) #print(img_GT_l[0].shape) avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) #print(imgs_in.size()) if flip_test: output = util.flipx4_forward(model, imgs_in) else: output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite( osp.join(save_subfolder, '{}.png'.format(img_name)), output) # calculate PSNR output = output / 255. GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel ''' if data_mode == 'Vid4': # bgr2y, [0, 1] GT = data_util.bgr2ycbcr(GT, only_y=True) output = data_util.bgr2ycbcr(output, only_y=True) ''' output, GT = util.crop_border([output, GT], crop_border) crt_psnr = util.calculate_psnr(output * 255, GT * 255) #logger.info('{:3d} - {:25} \tPSNR: {:.6f} dB'.format(img_idx + 1, img_name, crt_psnr)) if img_idx >= border_frame and img_idx < max_idx - border_frame: # center frames avg_psnr_center += crt_psnr N_center += 1 else: # border frames avg_psnr_border += crt_psnr N_border += 1 avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border) avg_psnr_center = avg_psnr_center / N_center avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border avg_psnr_l.append(avg_psnr) avg_psnr_center_l.append(avg_psnr_center) avg_psnr_border_l.append(avg_psnr_border) logger.info('Folder {} - Average PSNR: {:.6f} dB for {} frames; ' 'Center PSNR: {:.6f} dB for {} frames; ' 'Border PSNR: {:.6f} dB for {} frames.'.format( subfolder_name, avg_psnr, (N_center + N_border), avg_psnr_center, N_center, avg_psnr_border, N_border)) logger.info('################ Tidy Outputs ################') for subfolder_name, psnr, psnr_center, psnr_border in zip( subfolder_name_l, avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l): logger.info('Folder {} - Average PSNR: {:.6f} dB. ' 'Center PSNR: {:.6f} dB. ' 'Border PSNR: {:.6f} dB.'.format(subfolder_name, psnr, psnr_center, psnr_border)) logger.info('################ Final Results ################') logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) logger.info('Total Average PSNR: {:.6f} dB for {} clips. ' 'Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.'.format( sum(avg_psnr_l) / len(avg_psnr_l), len(subfolder_l), sum(avg_psnr_center_l) / len(avg_psnr_center_l), sum(avg_psnr_border_l) / len(avg_psnr_border_l))) return sum(avg_psnr_l) / len(avg_psnr_l)
def main(): ################# # configurations ################# device = torch.device("cuda") os.environ["CUDA_VISIBLE_DEVICES"] = "0" data_mode = ("licensePlate_blur_bicubic" ) # Vid4 | sharp_bicubic | blur_bicubic | blur | blur_comp # Vid4: SR # REDS4: sharp_bicubic (SR-clean), blur_bicubic (SR-blur); # blur (deblur-clean), blur_comp (deblur-compression). stage = 1 # 1 or 2, use two stage strategy for REDS dataset. flip_test = False ############################################################################ #### model if data_mode == "Vid4": if stage == 1: model_path = "../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth" else: raise ValueError("Vid4 does not support stage 2.") elif data_mode == "sharp_bicubic": if stage == 1: model_path = "../experiments/pretrained_models/EDVR_REDS_SR_L.pth" else: model_path = "../experiments/pretrained_models/EDVR_REDS_SR_Stage2.pth" elif data_mode == "blur_bicubic": if stage == 1: model_path = "../experiments/pretrained_models/EDVR_REDS_SRblur_L.pth" else: model_path = "../experiments/pretrained_models/EDVR_REDS_SRblur_Stage2.pth" elif data_mode == "blur": if stage == 1: model_path = "../experiments/pretrained_models/EDVR_REDS_deblur_L.pth" else: model_path = "../experiments/pretrained_models/EDVR_REDS_deblur_Stage2.pth" elif data_mode == "blur_comp": if stage == 1: model_path = "../experiments/pretrained_models/EDVR_REDS_deblurcomp_L.pth" else: model_path = ( "../experiments/pretrained_models/EDVR_REDS_deblurcomp_Stage2.pth" ) elif data_mode == "licensePlate_blur_bicubic": model_path = ("/workspace/video_sr/EDVR/experiments/" + "pretrained_models/EDVR_licensePlate_SRblur_L.pth") else: raise NotImplementedError if data_mode == "Vid4": N_in = 7 # use N_in images to restore one HR image else: N_in = 5 predeblur, HR_in = False, False back_RBs = 40 if (data_mode == "blur_bicubic") or (data_mode == "licensePlate_blur_bicubic"): predeblur = True elif data_mode == "blur" or data_mode == "blur_comp": predeblur, HR_in = True, True if stage == 2: HR_in = True back_RBs = 20 model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #### dataset if data_mode == "Vid4": test_dataset_folder = "../datasets/Vid4/BIx4" GT_dataset_folder = "../datasets/Vid4/GT" elif data_mode == "licensePlate_blur_bicubic": test_dataset_folder = "../datasets/license_plate2/BIx4" GT_dataset_folder = "../datasets/license_plate2/GT" else: if stage == 1: test_dataset_folder = "../datasets/REDS4/{}".format(data_mode) else: test_dataset_folder = "../results/REDS-EDVR_REDS_SR_L_flipx4" print("You should modify the test_dataset_folder path for stage 2") GT_dataset_folder = "../datasets/REDS4/GT" #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == "Vid4" or data_mode == "sharp_bicubic": padding = "new_info" else: padding = "replicate" save_imgs = True save_folder = "../results/{}".format(data_mode) util.mkdirs(save_folder) util.setup_logger("base", save_folder, "test", level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger("base") #### log info logger.info("Data: {} - {}".format(data_mode, test_dataset_folder)) logger.info("Padding mode: {}".format(padding)) logger.info("Model path: {}".format(model_path)) logger.info("Save images: {}".format(save_imgs)) logger.info("Flip test: {}".format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, "*"))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, "*"))) # for each subfolder for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, "*"))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = test_util.read_img_seq(subfolder) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, "*"))): img_GT_l.append(test_util.read_img(img_GT_path)) avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = test_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = (imgs_LQ.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).to(device)) if flip_test: output = test_util.flipx4_forward(model, imgs_in) else: output = test_util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite( osp.join(save_subfolder, "{}.png".format(img_name)), output) # calculate PSNR output = output / 255.0 GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel if data_mode == "Vid4": # bgr2y, [0, 1] GT = data_util.bgr2ycbcr(GT, only_y=True) output = data_util.bgr2ycbcr(output, only_y=True) output, GT = test_util.crop_border([output, GT], crop_border) crt_psnr = util.calculate_psnr(output * 255, GT * 255) logger.info("{:3d} - {:25} \tPSNR: {:.6f} dB".format( img_idx + 1, img_name, crt_psnr)) if (img_idx >= border_frame and img_idx < max_idx - border_frame): # center frames avg_psnr_center += crt_psnr N_center += 1 else: # border frames avg_psnr_border += crt_psnr N_border += 1 avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border) avg_psnr_center = avg_psnr_center / N_center avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border avg_psnr_l.append(avg_psnr) avg_psnr_center_l.append(avg_psnr_center) avg_psnr_border_l.append(avg_psnr_border) logger.info("Folder {} - Average PSNR: {:.6f} dB for {} frames; " "Center PSNR: {:.6f} dB for {} frames; " "Border PSNR: {:.6f} dB for {} frames.".format( subfolder_name, avg_psnr, (N_center + N_border), avg_psnr_center, N_center, avg_psnr_border, N_border, )) logger.info("################ Tidy Outputs ################") for subfolder_name, psnr, psnr_center, psnr_border in zip( subfolder_name_l, avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l): logger.info("Folder {} - Average PSNR: {:.6f} dB. " "Center PSNR: {:.6f} dB. " "Border PSNR: {:.6f} dB.".format(subfolder_name, psnr, psnr_center, psnr_border)) logger.info("################ Final Results ################") logger.info("Data: {} - {}".format(data_mode, test_dataset_folder)) logger.info("Padding mode: {}".format(padding)) logger.info("Model path: {}".format(model_path)) logger.info("Save images: {}".format(save_imgs)) logger.info("Flip test: {}".format(flip_test)) logger.info("Total Average PSNR: {:.6f} dB for {} clips. " "Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.".format( sum(avg_psnr_l) / len(avg_psnr_l), len(subfolder_l), sum(avg_psnr_center_l) / len(avg_psnr_center_l), sum(avg_psnr_border_l) / len(avg_psnr_border_l), ))
def create_test_png(model_path, device, gpu_id, opt, subfolder_l, save_folder, save_imgs, frame_notation, N_in, PAD, flip_test, end, total_run_time, logger, padding): model = EDVR_arch.EDVR(nf=opt['network_G']['nf'], nframes=opt['network_G']['nframes'], groups=opt['network_G']['groups'], front_RBs=opt['network_G']['front_RBs'], back_RBs=opt['network_G']['back_RBs'], predeblur=opt['network_G']['predeblur'], HR_in=opt['network_G']['HR_in'], w_TSA=opt['network_G']['w_TSA']) model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) #if (torch.cuda.is_available()): model = model.cuda(gpu_id) for subfolder in subfolder_l: input_subfolder = os.path.split(subfolder)[1] # subfolder_GT = os.path.join(GT_dataset_folder,input_subfolder) #if not os.path.exists(subfolder_GT): # continue print("Evaluate Folders: ", input_subfolder) subfolder_name = osp.basename(subfolder) #subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) # Num x 3 x H x W #img_GT_l = [] #for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): # img_GT_l.append(data_util.read_img(None, img_GT_path)) #avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] # todo here handle screen change select_idx, log1, log2, nota = data_util.index_generation_process_screen_change_withlog_fixbug(input_subfolder, frame_notation, img_idx, max_idx, N_in, padding=padding) if not log1 == None: logger.info('screen change') logger.info(nota) logger.info(log1) logger.info(log2) imgs_in = imgs_LQ.index_select(0, torch.LongTensor(select_idx)).unsqueeze(0).cuda(gpu_id) # 960 x 540 # here we split the input images 960x540 into 9 320x180 patch gtWidth = 3840 gtHeight = 2160 intWidth_ori = imgs_in.shape[4] # 960 intHeight_ori = imgs_in.shape[3] # 540 split_lengthY = 180 split_lengthX = 320 scale = 4 intPaddingRight_ = int(float(intWidth_ori) / split_lengthX + 1) * split_lengthX - intWidth_ori intPaddingBottom_ = int(float(intHeight_ori) / split_lengthY + 1) * split_lengthY - intHeight_ori intPaddingRight_ = 0 if intPaddingRight_ == split_lengthX else intPaddingRight_ intPaddingBottom_ = 0 if intPaddingBottom_ == split_lengthY else intPaddingBottom_ pader0 = torch.nn.ReplicationPad2d([0, intPaddingRight_, 0, intPaddingBottom_]) print("Init pad right/bottom " + str(intPaddingRight_) + " / " + str(intPaddingBottom_)) intPaddingRight = PAD # 32# 64# 128# 256 intPaddingLeft = PAD # 32#64 #128# 256 intPaddingTop = PAD # 32#64 #128#256 intPaddingBottom = PAD # 32#64 # 128# 256 pader = torch.nn.ReplicationPad2d([intPaddingLeft, intPaddingRight, intPaddingTop, intPaddingBottom]) imgs_in = torch.squeeze(imgs_in, 0)# N C H W imgs_in = pader0(imgs_in) # N C 540 960 imgs_in = pader(imgs_in) # N C 604 1024 assert (split_lengthY == int(split_lengthY) and split_lengthX == int(split_lengthX)) split_lengthY = int(split_lengthY) split_lengthX = int(split_lengthX) split_numY = int(float(intHeight_ori) / split_lengthY ) split_numX = int(float(intWidth_ori) / split_lengthX) splitsY = range(0, split_numY) splitsX = range(0, split_numX) intWidth = split_lengthX intWidth_pad = intWidth + intPaddingLeft + intPaddingRight intHeight = split_lengthY intHeight_pad = intHeight + intPaddingTop + intPaddingBottom # print("split " + str(split_numY) + ' , ' + str(split_numX)) y_all = np.zeros((gtHeight, gtWidth, 3), dtype="float32") # HWC for split_j, split_i in itertools.product(splitsY, splitsX): # print(str(split_j) + ", \t " + str(split_i)) X0 = imgs_in[:, :, split_j * split_lengthY:(split_j + 1) * split_lengthY + intPaddingBottom + intPaddingTop, split_i * split_lengthX:(split_i + 1) * split_lengthX + intPaddingRight + intPaddingLeft] # y_ = torch.FloatTensor() X0 = torch.unsqueeze(X0, 0) # N C H W -> 1 N C H W #X0 = X0.cuda(gpu_id) if flip_test: output = util.flipx4_forward(model, X0) else: output = util.single_forward(model, X0) output_depadded = output[0, :, intPaddingTop * scale :(intPaddingTop+intHeight) * scale, intPaddingLeft * scale: (intPaddingLeft+intWidth)*scale] output_depadded = output_depadded.squeeze(0) output = util.tensor2img(output_depadded) y_all[split_j * split_lengthY * scale :(split_j + 1) * split_lengthY * scale, split_i * split_lengthX * scale :(split_i + 1) * split_lengthX * scale, :] = \ np.round(output).astype(np.uint8) # plt.figure(0) # plt.title("pic") # plt.imshow(y_all) if save_imgs: cv2.imwrite(osp.join(save_subfolder, '{}.png'.format(img_name)), y_all) print("*****************current image process time \t " + str( time.time() - end) + "s ******************") total_run_time.update(time.time() - end, 1) # calculate PSNR #y_all = y_all / 255. #GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel #if data_mode == 'Vid4': # bgr2y, [0, 1] # GT = data_util.bgr2ycbcr(GT, only_y=True) # y_all = data_util.bgr2ycbcr(y_all, only_y=True) #y_all, GT = util.crop_border([y_all, GT], crop_border) #crt_psnr = util.calculate_psnr(y_all * 255, GT * 255) #logger.info('{:3d} - {:25} \tPSNR: {:.6f} dB'.format(img_idx + 1, img_name, crt_psnr)) logger.info('{} : {:3d} - {:25} \t'.format(input_subfolder, img_idx + 1, img_name))
def main(): ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '0' data_mode = 'SDR_4bit' stage = 1 # 1 or 2, use two stage strategy for REDS dataset. flip_test = False ############################################################################ #### model if data_mode == 'SDR_4bit': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_Stage2.pth' else: raise NotImplementedError # use N_in images to restore one high bitdepth image N_in = 5 # predeblur: predeblur for blurry input # HR_in: downsample high resolution input predeblur, HR_in = False, False back_RBs = 40 predeblur = True HR_in = True if data_mode == 'SDR_4bit': # predeblur, HR_in = False, True pass if stage == 2: HR_in = True back_RBs = 20 # EDVR(num_feature_map, num_input_frames, deformable_groups?, front_RBs, # back_RBs, predeblur, HR_in) model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #### dataset if stage == 1: test_dataset_folder = '../datasets/{}'.format(data_mode) else: test_dataset_folder = '../' print('You should modify the test_dataset_folder path for stage 2') GT_dataset_folder = '../datasets/SDR_10bit/' #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode padding = 'replicate' save_imgs = True save_folder = '../results/{}'.format(data_mode) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) # for each subfolder for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LBD and GT images #### resize to avoid cuda out of memory, 2160x3840->720x1280 imgs_LBD = data_util.read_img_seq(subfolder, scale=65535., zoomout=(1280, 720)) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): img_GT_l.append( data_util.read_img(None, img_GT_path, scale=65535., zoomout=True)) avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] # generate frame index select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LBD.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) if flip_test: # self ensemble with fipping input at four different directions output = util.flipx4_forward(model, imgs_in) else: output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0), out_type=np.uint16) if save_imgs: cv2.imwrite( osp.join(save_subfolder, '{}.png'.format(img_name)), output) # calculate PSNR # output = output / 255. output = output / 65535. GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel if data_mode == 'Vid4': # bgr2y, [0, 1] GT = data_util.bgr2ycbcr(GT, only_y=True) output = data_util.bgr2ycbcr(output, only_y=True) output, GT = util.crop_border([output, GT], crop_border) crt_psnr = util.calculate_psnr(output * 65535, GT * 65535) logger.info('{:3d} - {:25} \tPSNR: {:.6f} dB'.format( img_idx + 1, img_name, crt_psnr)) if img_idx >= border_frame and img_idx < max_idx - border_frame: # center frames avg_psnr_center += crt_psnr N_center += 1 else: # border frames avg_psnr_border += crt_psnr N_border += 1 avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border) avg_psnr_center = avg_psnr_center / N_center avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border avg_psnr_l.append(avg_psnr) avg_psnr_center_l.append(avg_psnr_center) avg_psnr_border_l.append(avg_psnr_border) logger.info('Folder {} - Average PSNR: {:.6f} dB for {} frames; ' 'Center PSNR: {:.6f} dB for {} frames; ' 'Border PSNR: {:.6f} dB for {} frames.'.format( subfolder_name, avg_psnr, (N_center + N_border), avg_psnr_center, N_center, avg_psnr_border, N_border)) logger.info('################ Tidy Outputs ################') for subfolder_name, psnr, psnr_center, psnr_border in zip( subfolder_name_l, avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l): logger.info('Folder {} - Average PSNR: {:.6f} dB. ' 'Center PSNR: {:.6f} dB. ' 'Border PSNR: {:.6f} dB.'.format(subfolder_name, psnr, psnr_center, psnr_border)) logger.info('################ Final Results ################') logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) logger.info('Total Average PSNR: {:.6f} dB for {} clips. ' 'Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.'.format( sum(avg_psnr_l) / len(avg_psnr_l), len(subfolder_l), sum(avg_psnr_center_l) / len(avg_psnr_center_l), sum(avg_psnr_border_l) / len(avg_psnr_border_l)))
def main(opts): ################## configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpus cache_all_imgs = opts.cache > 0 n_gpus = len(opts.gpus.split(',')) flip_test, save_imgs = False, False scale = 4 N_in, nf = 5, 64 back_RBs = 10 w_TSA = False predeblur, HR_in = False, False crop_border = 0 border_frame = N_in // 2 padding = 'new_info' ################## model files #################### model_dir = opts.model_dir if osp.isfile(model_dir): model_names = [osp.basename(model_dir)] model_dir = osp.dirname(model_dir) elif osp.isdir(model_dir): model_names = [ x for x in os.listdir(model_dir) if str.isdigit(x.split('_')[0]) ] model_names = sorted(model_names, key=lambda x: int(x.split("_")[0])) else: raise IOError('Invalid model_dir: {}'.format(model_dir)) ################## dataset ######################## test_subs = sorted(os.listdir(opts.test_dir)) gt_subs = os.listdir(opts.gt_dir) valid_test_subs = [sub in gt_subs for sub in test_subs] assert (all(valid_test_subs)), 'Invalid sub folders exists in {}'.format( opts.test_dir) scale = float(os.path.basename(os.path.dirname(opts.test_dir))[1:]) if cache_all_imgs: print('Cacheing all testing images ...') all_imgs = {} for sub in test_subs: print('Reading sub-folder: {} ...'.format(sub)) test_sub_dir = osp.join(opts.test_dir, sub) gt_sub_dir = osp.join(opts.gt_dir, sub) all_imgs[sub] = {'test': [], 'gt': []} im_names = sorted(os.listdir(test_sub_dir)) for i, name in enumerate(im_names): test_im_path = osp.join(test_sub_dir, name) gt_im_path = osp.join(gt_sub_dir, name) test_im = cv2.imread(test_im_path, cv2.IMREAD_UNCHANGED)[:, :, (2, 1, 0)] test_im = test_im.astype(np.float32).transpose( (2, 0, 1)) / 255. all_imgs[sub]['test'].append(test_im) gt_im = cv2.imread(gt_im_path, cv2.IMREAD_UNCHANGED).astype(np.float32) all_imgs[sub]['gt'].append(gt_im) all_psnrs = [] for model_name in model_names: model_path = osp.join(model_dir, model_name) exp_name = model_name.split('_')[0] if 'meta' in opts.mode.lower(): model = EDVR_arch.MetaEDVR(nf=nf, nframes=N_in, groups=8, front_RBs=5, center=None, back_RBs=back_RBs, predeblur=predeblur, HR_in=HR_in, w_TSA=w_TSA) elif opts.mode.lower() == 'edvr': model = EDVR_arch.EDVR(nf=nf, nframes=N_in, groups=8, front_RBs=5, center=None, back_RBs=back_RBs, predeblur=predeblur, HR_in=HR_in, w_TSA=w_TSA) elif opts.mode.lower() == 'upedvr': model = EDVR_arch.UPEDVR(nf=nf, nframes=N_in, groups=8, front_RBs=5, center=None, back_RBs=10, w_TSA=w_TSA, down_scale=True, align_target=True, ret_valid=True) elif opts.mode.lower() == 'upcont1': model = EDVR_arch.UPControlEDVR(nf=nf, nframes=N_in, groups=8, front_RBs=5, center=None, back_RBs=10, w_TSA=w_TSA, down_scale=True, align_target=True, ret_valid=True, multi_scale_cont=False) elif opts.mode.lower() == 'upcont3': model = EDVR_arch.UPControlEDVR(nf=nf, nframes=N_in, groups=8, front_RBs=5, center=None, back_RBs=10, w_TSA=w_TSA, down_scale=True, align_target=True, ret_valid=True, multi_scale_cont=True) elif opts.mode.lower() == 'upcont2': model = EDVR_arch.UPControlEDVR(nf=nf, nframes=N_in, groups=8, front_RBs=5, center=None, back_RBs=10, w_TSA=w_TSA, down_scale=True, align_target=True, ret_valid=True, multi_scale_cont=True) else: raise TypeError('Unknown model mode: {}'.format(opts.mode)) save_folder = osp.join(opts.save_dir, exp_name) util.mkdirs(save_folder) util.setup_logger(exp_name, save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger(exp_name) #### log info logger.info('Data: {}'.format(opts.test_dir)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() if n_gpus > 1: model = nn.DataParallel(model) model = model.to(device) avg_psnrs, avg_psnr_centers, avg_psnr_borders = [], [], [] avg_ssims, avg_ssim_centers, avg_ssim_borders = [], [], [] evaled_subs = [] # for each subfolder for sub in test_subs: evaled_subs.append(sub) test_sub_dir = osp.join(opts.test_dir, sub) gt_sub_dir = osp.join(opts.gt_dir, sub) img_names = sorted(os.listdir(test_sub_dir)) max_idx = len(img_names) if save_imgs: save_subfolder = osp.join(save_folder, sub) util.mkdirs(save_subfolder) #### get LQ and GT images if not cache_all_imgs: img_LQs, img_GTs = [], [] for i, name in enumerate(img_names): test_im_path = osp.join(test_sub_dir, name) gt_im_path = osp.join(gt_sub_dir, name) test_im = cv2.imread(test_im_path, cv2.IMREAD_UNCHANGED)[:, :, (2, 1, 0)] test_im = test_im.astype(np.float32).transpose( (2, 0, 1)) / 255. gt_im = cv2.imread(gt_im_path, cv2.IMREAD_UNCHANGED).astype(np.float32) img_LQs.append(test_im) img_GTs.append(gt_im) else: img_LQs = all_imgs[sub]['test'] img_GTs = all_imgs[sub]['gt'] avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 avg_ssim, avg_ssim_border, avg_ssim_center = 0, 0, 0 # process each image for i in range(0, max_idx, n_gpus): end = min(i + n_gpus, max_idx) select_idxs = [ data_util.index_generation(j, max_idx, N_in, padding=padding) for j in range(i, end) ] imgs = [] for select_idx in select_idxs: im = torch.from_numpy( np.stack([img_LQs[k] for k in select_idx])) imgs.append(im) if (i + n_gpus) > max_idx: for _ in range(max_idx, i + n_gpus): imgs.append(torch.zeros_like(im)) imgs = torch.stack(imgs, 0).to(device) if flip_test: output = util.flipx4_forward(model, imgs) else: if 'meta' in opts.mode.lower(): output = util.meta_single_forward( model, imgs, scale, n_gpus) if 'up' in opts.mode.lower(): output = util.up_single_forward(model, imgs, scale) else: output = util.single_forward(model, imgs) output = [ util.tensor2img(x).astype(np.float32) for x in output ] if save_imgs: for ii in range(i, end): cv2.imwrite( osp.join(save_subfolder, '{}.png'.format(img_names[ii])), output[ii - i].astype(np.uint8)) # calculate PSNR GT = np.copy(img_GTs[i:end]) output = util.crop_border(output, crop_border) GT = util.crop_border(GT, crop_border) for m in range(i, end): crt_psnr = util.calculate_psnr(output[m - i], GT[m - i]) crt_ssim = util.calculate_ssim(output[m - i], GT[m - i]) logger.info( '{:3d} - {:25} \tPSNR: {:.6f} dB SSIM: {:.6}'. format(m + 1, img_names[m], crt_psnr, crt_ssim)) if m >= border_frame and m < max_idx - border_frame: # center frames avg_psnr_center += crt_psnr avg_ssim_center += crt_ssim N_center += 1 else: # border frames avg_psnr_border += crt_psnr avg_ssim_border += crt_ssim N_border += 1 avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border) avg_psnr_center = avg_psnr_center / N_center avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border avg_ssim = (avg_ssim_center + avg_ssim_border) / (N_center + N_border) avg_ssim_center = avg_ssim_center / N_center avg_ssim_border = 0 if N_border == 0 else avg_ssim_border / N_border avg_psnrs.append(avg_psnr) avg_psnr_centers.append(avg_psnr_center) avg_psnr_borders.append(avg_psnr_border) avg_ssims.append(avg_ssim) avg_ssim_centers.append(avg_ssim_center) avg_ssim_borders.append(avg_ssim_border) logger.info('Folder {} - Average PSNR: {:.6f} dB for {} frames; ' 'Center PSNR: {:.6f} dB for {} frames; ' 'Border PSNR: {:.6f} dB for {} frames.'.format( sub, avg_psnr, (N_center + N_border), avg_psnr_center, N_center, avg_psnr_border, N_border)) logger.info('Folder {} - Average SSIM: {:.6f} for {} frames; ' 'Center SSIM: {:.6f} for {} frames; ' 'Border SSIM: {:.6f} for {} frames.'.format( sub, avg_ssim, (N_center + N_border), avg_ssim_center, N_center, avg_ssim_border, N_border)) logger.info('################ Tidy Outputs ################') for sub_name, psnr, psnr_center, psnr_border, ssim, ssim_center, ssim_border in zip( evaled_subs, avg_psnrs, avg_psnr_centers, avg_psnr_borders, avg_ssims, avg_ssim_centers, avg_ssim_borders): logger.info( 'Folder {} - Average PSNR: {:.6f} dB. ' 'Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.'.format( sub_name, psnr, psnr_center, psnr_border)) logger.info('Folder {} - Average SSIM: {:.6f} ' 'Center SSIM: {:.6f} Border SSIM: {:.6f} '.format( sub_name, ssim, ssim_center, ssim_border)) logger.info('################ Final Results ################') logger.info('Data: {}'.format(opts.test_dir)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) logger.info('Total Average PSNR: {:.6f} dB for {} clips. ' 'Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.'.format( sum(avg_psnrs) / len(avg_psnrs), len(test_subs), sum(avg_psnr_centers) / len(avg_psnr_centers), sum(avg_psnr_borders) / len(avg_psnr_borders))) logger.info('Total Average SSIM: {:.6f} for {} clips. ' 'Center SSIM: {:.6f} Border SSIM: {:.6f} '.format( sum(avg_ssims) / len(avg_ssims), len(test_subs), sum(avg_ssim_centers) / len(avg_ssim_centers), sum(avg_ssim_borders) / len(avg_ssim_borders)))
def main(): ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '1' data_mode = 'Vid4' # Vid4 | sharp_bicubic | blur_bicubic | blur | blur_comp # Vid4: SR # REDS4: sharp_bicubic (SR-clean), blur_bicubic (SR-blur); # blur (deblur-clean), blur_comp (deblur-compression). stage = 1 # 1 or 2, use two stage strategy for REDS dataset. flip_test = False ############################################################################ #### model if data_mode == 'Vid4': if stage == 1: #model_path = '../experiments/pretrained_models/EDVR_REDS_SR_M.pth' model_path = '../experiments/002_EDVR_lr4e-4_600k_AI4KHDR/models/4000_G.pth' else: raise ValueError('Vid4 does not support stage 2.') elif data_mode == 'sharp_bicubic': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_Stage2.pth' elif data_mode == 'blur_bicubic': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_Stage2.pth' elif data_mode == 'blur': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_Stage2.pth' elif data_mode == 'blur_comp': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_Stage2.pth' else: raise NotImplementedError if data_mode == 'Vid4': N_in = 5 # use N_in images to restore one HR image else: N_in = 5 predeblur, HR_in = False, False back_RBs = 10 if data_mode == 'blur_bicubic': predeblur = True if data_mode == 'blur' or data_mode == 'blur_comp': predeblur, HR_in = True, True if stage == 2: HR_in = True back_RBs = 20 model = EDVR_arch.EDVR(64, 5, 8, 5, 10, predeblur=predeblur, HR_in=HR_in) #### dataset if data_mode == 'Vid4': test_dataset_folder = '/workspace/nas_mengdongwei/dataset/AI4KHDR/valid/540p_frames' GT_dataset_folder = '/workspace/nas_mengdongwei/dataset/AI4KHDR/valid/4k_frames' #test_dataset_folder = '../datasets/Vid4/BIx4' #GT_dataset_folder = '../datasets/Vid4/GT' else: if stage == 1: test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode) else: test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4' print('You should modify the test_dataset_folder path for stage 2') GT_dataset_folder = '../datasets/REDS4/GT' #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'Vid4' or data_mode == 'sharp_bicubic': padding = 'new_info' else: padding = 'replicate' save_imgs = True save_folder = '../results/{}'.format(data_mode) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=False) model.eval() model = model.to(device) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) # for each subfolder for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): img_GT_l.append(data_util.read_img(None, img_GT_path)) avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) if flip_test: output = util.flipx4_forward(model, imgs_in) else: output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite( osp.join(save_subfolder, '{}.png'.format(img_name)), output) # calculate PSNR output = output / 255. GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel if data_mode == 'Vid4': # bgr2y, [0, 1] GT = data_util.bgr2ycbcr(GT, only_y=True) output = data_util.bgr2ycbcr(output, only_y=True) output, GT = util.crop_border([output, GT], crop_border) crt_psnr = util.calculate_psnr(output * 255, GT * 255) #logger.info('{:3d} - {:25} \tPSNR: {:.6f} dB'.format(img_idx + 1, img_name, crt_psnr)) if img_idx >= border_frame and img_idx < max_idx - border_frame: # center frames avg_psnr_center += crt_psnr N_center += 1 else: # border frames avg_psnr_border += crt_psnr N_border += 1 avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border) avg_psnr_center = avg_psnr_center / N_center avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border avg_psnr_l.append(avg_psnr) avg_psnr_center_l.append(avg_psnr_center) avg_psnr_border_l.append(avg_psnr_border) logger.info('Folder {} - Average PSNR: {:.6f} dB for {} frames; ' 'Center PSNR: {:.6f} dB for {} frames; ' 'Border PSNR: {:.6f} dB for {} frames.'.format( subfolder_name, avg_psnr, (N_center + N_border), avg_psnr_center, N_center, avg_psnr_border, N_border)) logger.info('################ Tidy Outputs ################') for subfolder_name, psnr, psnr_center, psnr_border in zip( subfolder_name_l, avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l): logger.info('Folder {} - Average PSNR: {:.6f} dB. ' 'Center PSNR: {:.6f} dB. ' 'Border PSNR: {:.6f} dB.'.format(subfolder_name, psnr, psnr_center, psnr_border)) logger.info('################ Final Results ################') logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) logger.info('Total Average PSNR: {:.6f} dB for {} clips. ' 'Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.'.format( sum(avg_psnr_l) / len(avg_psnr_l), len(subfolder_l), sum(avg_psnr_center_l) / len(avg_psnr_center_l), sum(avg_psnr_border_l) / len(avg_psnr_border_l)))
def define_G(opt): opt_net = opt['network_G'] which_model = opt_net['which_model_G'] # image restoration if which_model == 'MSRResNet': netG = SRResNet_arch.MSRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'], upscale=opt_net['scale']) elif which_model == 'RRDBNet': netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb']) # video restoration elif which_model == 'EDVR': netG = EDVR_arch.EDVR(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'MY_EDVR_FusionDenoise': netG = my_EDVR_arch.MYEDVR_FusionDenoise( nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'MY_EDVR_RES': netG = my_EDVR_arch.MYEDVR_RES(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'MY_EDVR_PreEnhance': netG = my_EDVR_arch.MYEDVR_PreEnhance(nf=opt_net['nf'], nframes=opt_net['nframes'], groups=opt_net['groups'], front_RBs=opt_net['front_RBs'], back_RBs=opt_net['back_RBs'], center=opt_net['center'], predeblur=opt_net['predeblur'], HR_in=opt_net['HR_in'], w_TSA=opt_net['w_TSA']) elif which_model == 'Recurr_ResBlocks': netG = Recurr_arch.Recurr_ResBlocks( nf=opt_net['nf'], N_RBs=opt_net['N_RBs'], N_flow_lv=opt_net['N_flow_lv'], pretrain_flow=opt_net['pretrain_flow']) else: raise NotImplementedError( 'Generator model [{:s}] not recognized'.format(which_model)) return netG
def main(): #################### # arguments parser # #################### # [format] dataset(vid4, REDS4) N(number of frames) # data_mode = str(args.dataset) # N_in = int(args.n_frames) # metrics = str(args.metrics) # output_format = str(args.output_format) ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '0' #data_mode = 'Vid4' # Vid4 | sharp_bicubic | blur_bicubic | blur | blur_comp # Vid4: SR # REDS4: sharp_bicubic (SR-clean), blur_bicubic (SR-blur); # blur (deblur-clean), blur_comp (deblur-compression). # STAGE Vid4 # Collecting results for Vid4 model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth' stage = 1 # 1 or 2, use two stage strategy for REDS dataset. flip_test = False predeblur, HR_in = False, False back_RBs = 40 N_model_default = 7 data_mode = 'Vid4' # vid4_dir_map = {"calendar": 0, "city": 1, "foliage": 2, "walk": 3} vid4_results = {"calendar": {}, "city": {}, "foliage": {}, "walk": {}} #vid4_results = 4 * [[]] for N_in in range(1, N_model_default + 1): raw_model = EDVR_arch.EDVR(128, N_model_default, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) test_dataset_folder = '../datasets/Vid4/BIx4' GT_dataset_folder = '../datasets/Vid4/GT' aposterior_GT_dataset_folder = '../datasets/Vid4/GT_7' crop_border = 0 border_frame = N_in // 2 # border frames when evaluate padding = 'new_info' save_imgs = False raw_model.load_state_dict(torch.load(model_path), strict=True) model.nf = raw_model.nf model.center = N_in // 2 # if center is None else center model.is_predeblur = raw_model.is_predeblur model.HR_in = raw_model.HR_in model.w_TSA = raw_model.w_TSA if model.is_predeblur: model.pre_deblur = raw_model.pre_deblur # Predeblur_ResNet_Pyramid(nf=nf, HR_in=self.HR_in) model.conv_1x1 = raw_model.conv_1x1 # nn.Conv2d(nf, nf, 1, 1, bias=True) else: if model.HR_in: model.conv_first_1 = raw_model.conv_first_1 # nn.Conv2d(3, nf, 3, 1, 1, bias=True) model.conv_first_2 = raw_model.conv_first_2 # nn.Conv2d(nf, nf, 3, 2, 1, bias=True) model.conv_first_3 = raw_model.conv_first_3 # nn.Conv2d(nf, nf, 3, 2, 1, bias=True) else: model.conv_first = raw_model.conv_first # nn.Conv2d(3, nf, 3, 1, 1, bias=True) model.feature_extraction = raw_model.feature_extraction # arch_util.make_layer(ResidualBlock_noBN_f, front_RBs) model.fea_L2_conv1 = raw_model.fea_L2_conv1 # nn.Conv2d(nf, nf, 3, 2, 1, bias=True) model.fea_L2_conv2 = raw_model.fea_L2_conv2 # nn.Conv2d(nf, nf, 3, 1, 1, bias=True) model.fea_L3_conv1 = raw_model.fea_L3_conv1 # nn.Conv2d(nf, nf, 3, 2, 1, bias=True) model.fea_L3_conv2 = raw_model.fea_L3_conv2 # nn.Conv2d(nf, nf, 3, 1, 1, bias=True) model.pcd_align = raw_model.pcd_align # PCD_Align(nf=nf, groups=groups) model.tsa_fusion.center = model.center model.tsa_fusion.tAtt_1 = raw_model.tsa_fusion.tAtt_1 model.tsa_fusion.tAtt_2 = raw_model.tsa_fusion.tAtt_2 model.tsa_fusion.fea_fusion = copy.deepcopy(raw_model.tsa_fusion.fea_fusion) model.tsa_fusion.fea_fusion.weight = copy.deepcopy(torch.nn.Parameter(raw_model.tsa_fusion.fea_fusion.weight[:, 0:N_in * 128, :, :])) model.tsa_fusion.sAtt_1 = copy.deepcopy(raw_model.tsa_fusion.sAtt_1) model.tsa_fusion.sAtt_1.weight = copy.deepcopy(torch.nn.Parameter(raw_model.tsa_fusion.sAtt_1.weight[:, 0:N_in * 128, :, :])) model.tsa_fusion.maxpool = raw_model.tsa_fusion.maxpool model.tsa_fusion.avgpool = raw_model.tsa_fusion.avgpool model.tsa_fusion.sAtt_2 = raw_model.tsa_fusion.sAtt_2 model.tsa_fusion.sAtt_3 = raw_model.tsa_fusion.sAtt_3 model.tsa_fusion.sAtt_4 = raw_model.tsa_fusion.sAtt_4 model.tsa_fusion.sAtt_5 = raw_model.tsa_fusion.sAtt_5 model.tsa_fusion.sAtt_L1 = raw_model.tsa_fusion.sAtt_L1 model.tsa_fusion.sAtt_L2 = raw_model.tsa_fusion.sAtt_L2 model.tsa_fusion.sAtt_L3 = raw_model.tsa_fusion.sAtt_L3 model.tsa_fusion.sAtt_add_1 = raw_model.tsa_fusion.sAtt_add_1 model.tsa_fusion.sAtt_add_2 = raw_model.tsa_fusion.sAtt_add_2 model.tsa_fusion.lrelu = raw_model.tsa_fusion.lrelu model.recon_trunk = raw_model.recon_trunk model.upconv1 = raw_model.upconv1 model.upconv2 = raw_model.upconv2 model.pixel_shuffle = raw_model.pixel_shuffle model.HRconv = raw_model.HRconv model.conv_last = raw_model.conv_last model.lrelu = raw_model.lrelu ##################################################### model.eval() model = model.to(device) #avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) subfolder_GT_a_l = sorted(glob.glob(osp.join(aposterior_GT_dataset_folder, "*"))) # for each subfolder for subfolder, subfolder_GT, subfolder_GT_a in zip(subfolder_l, subfolder_GT_l, subfolder_GT_a_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) print("MAX_IDX: ", max_idx) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): img_GT_l.append(data_util.read_img(None, img_GT_path)) img_GT_a = [] for img_GT_a_path in sorted(glob.glob(osp.join(subfolder_GT_a, '*'))): img_GT_a.append(data_util.read_img(None, img_GT_a_path)) #avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select(0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) if flip_test: output = util.flipx4_forward(model, imgs_in) else: print("IMGS_IN SHAPE: ", imgs_in.shape) output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) # calculate PSNR output = output / 255. GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel #if data_mode == 'Vid4': # bgr2y, [0, 1] GT = data_util.bgr2ycbcr(GT, only_y=True) output = data_util.bgr2ycbcr(output, only_y=True) GT_a = np.copy(img_GT_a[img_idx]) GT_a = data_util.bgr2ycbcr(GT_a, only_y=True) output_a = copy.deepcopy(output) output, GT = util.crop_border([output, GT], crop_border) crt_psnr = util.calculate_psnr(output * 255, GT * 255) crt_ssim = util.calculate_ssim(output * 255, GT * 255) output_a, GT_a = util.crop_border([output_a, GT_a], crop_border) crt_aposterior = util.calculate_ssim(output_a * 255, GT_a * 255) # CHANGE t = vid4_results[subfolder_name].get(str(img_name)) if t != None: vid4_results[subfolder_name][img_name].add_psnr(crt_psnr) vid4_results[subfolder_name][img_name].add_gt_ssim(crt_ssim) vid4_results[subfolder_name][img_name].add_aposterior_ssim(crt_aposterior) else: vid4_results[subfolder_name].update({img_name: metrics_file(img_name)}) vid4_results[subfolder_name][img_name].add_psnr(crt_psnr) vid4_results[subfolder_name][img_name].add_gt_ssim(crt_ssim) vid4_results[subfolder_name][img_name].add_aposterior_ssim(crt_aposterior) ############################################################################ #### model #### writing vid4 results util.mkdirs('../results/calendar') util.mkdirs('../results/city') util.mkdirs('../results/foliage') util.mkdirs('../results/walk') save_folder = '../results/' for i, dir_name in enumerate(["calendar", "city", "foliage", "walk"]): save_subfolder = osp.join(save_folder, dir_name) for j, value in vid4_results[dir_name].items(): # cur_result = json.dumps(_) with open(osp.join(save_subfolder, '{}.json'.format(value.name)), 'w') as outfile: json.dump(value.__dict__, outfile, ensure_ascii=False, indent=4) #json.dump(cur_result, outfile) #cv2.imwrite(osp.join(save_subfolder, '{}.png'.format(img_name)), output) ################################################################################### # STAGE REDS reds4_results = {"000": {}, "011": {}, "015": {}, "020": {}} data_mode = 'sharp_bicubic' N_model_default = 5 for N_in in range(1, N_model_default + 1): for stage in range(1,3): flip_test = False if data_mode == 'sharp_bicubic': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_Stage2.pth' elif data_mode == 'blur_bicubic': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_Stage2.pth' elif data_mode == 'blur': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_Stage2.pth' elif data_mode == 'blur_comp': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_Stage2.pth' else: raise NotImplementedError predeblur, HR_in = False, False back_RBs = 40 if data_mode == 'blur_bicubic': predeblur = True if data_mode == 'blur' or data_mode == 'blur_comp': predeblur, HR_in = True, True if stage == 2: HR_in = True back_RBs = 20 if stage == 1: test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode) else: test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4' print('You should modify the test_dataset_folder path for stage 2') GT_dataset_folder = '../datasets/REDS4/GT' raw_model = EDVR_arch.EDVR(128, N_model_default, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'Vid4' or data_mode == 'sharp_bicubic': padding = 'new_info' else: padding = 'replicate' save_imgs = True data_mode_t = copy.deepcopy(data_mode) if stage == 1 and data_mode_t != 'Vid4': data_mode = 'REDS-EDVR_REDS_SR_L_flipx4' save_folder = '../results/{}'.format(data_mode) data_mode = copy.deepcopy(data_mode_t) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) aposterior_GT_dataset_folder = '../datasets/REDS4/GT_5' crop_border = 0 border_frame = N_in // 2 # border frames when evaluate raw_model.load_state_dict(torch.load(model_path), strict=True) model.nf = raw_model.nf model.center = N_in // 2 # if center is None else center model.is_predeblur = raw_model.is_predeblur model.HR_in = raw_model.HR_in model.w_TSA = raw_model.w_TSA if model.is_predeblur: model.pre_deblur = raw_model.pre_deblur # Predeblur_ResNet_Pyramid(nf=nf, HR_in=self.HR_in) model.conv_1x1 = raw_model.conv_1x1 # nn.Conv2d(nf, nf, 1, 1, bias=True) else: if model.HR_in: model.conv_first_1 = raw_model.conv_first_1 # nn.Conv2d(3, nf, 3, 1, 1, bias=True) model.conv_first_2 = raw_model.conv_first_2 # nn.Conv2d(nf, nf, 3, 2, 1, bias=True) model.conv_first_3 = raw_model.conv_first_3 # nn.Conv2d(nf, nf, 3, 2, 1, bias=True) else: model.conv_first = raw_model.conv_first # nn.Conv2d(3, nf, 3, 1, 1, bias=True) model.feature_extraction = raw_model.feature_extraction # arch_util.make_layer(ResidualBlock_noBN_f, front_RBs) model.fea_L2_conv1 = raw_model.fea_L2_conv1 # nn.Conv2d(nf, nf, 3, 2, 1, bias=True) model.fea_L2_conv2 = raw_model.fea_L2_conv2 # nn.Conv2d(nf, nf, 3, 1, 1, bias=True) model.fea_L3_conv1 = raw_model.fea_L3_conv1 # nn.Conv2d(nf, nf, 3, 2, 1, bias=True) model.fea_L3_conv2 = raw_model.fea_L3_conv2 # nn.Conv2d(nf, nf, 3, 1, 1, bias=True) model.pcd_align = raw_model.pcd_align # PCD_Align(nf=nf, groups=groups) model.tsa_fusion.center = model.center model.tsa_fusion.tAtt_1 = raw_model.tsa_fusion.tAtt_1 model.tsa_fusion.tAtt_2 = raw_model.tsa_fusion.tAtt_2 model.tsa_fusion.fea_fusion = copy.deepcopy(raw_model.tsa_fusion.fea_fusion) model.tsa_fusion.fea_fusion.weight = copy.deepcopy(torch.nn.Parameter(raw_model.tsa_fusion.fea_fusion.weight[:, 0:N_in * 128, :, :])) model.tsa_fusion.sAtt_1 = copy.deepcopy(raw_model.tsa_fusion.sAtt_1) model.tsa_fusion.sAtt_1.weight = copy.deepcopy(torch.nn.Parameter(raw_model.tsa_fusion.sAtt_1.weight[:, 0:N_in * 128, :, :])) model.tsa_fusion.maxpool = raw_model.tsa_fusion.maxpool model.tsa_fusion.avgpool = raw_model.tsa_fusion.avgpool model.tsa_fusion.sAtt_2 = raw_model.tsa_fusion.sAtt_2 model.tsa_fusion.sAtt_3 = raw_model.tsa_fusion.sAtt_3 model.tsa_fusion.sAtt_4 = raw_model.tsa_fusion.sAtt_4 model.tsa_fusion.sAtt_5 = raw_model.tsa_fusion.sAtt_5 model.tsa_fusion.sAtt_L1 = raw_model.tsa_fusion.sAtt_L1 model.tsa_fusion.sAtt_L2 = raw_model.tsa_fusion.sAtt_L2 model.tsa_fusion.sAtt_L3 = raw_model.tsa_fusion.sAtt_L3 model.tsa_fusion.sAtt_add_1 = raw_model.tsa_fusion.sAtt_add_1 model.tsa_fusion.sAtt_add_2 = raw_model.tsa_fusion.sAtt_add_2 model.tsa_fusion.lrelu = raw_model.tsa_fusion.lrelu model.recon_trunk = raw_model.recon_trunk model.upconv1 = raw_model.upconv1 model.upconv2 = raw_model.upconv2 model.pixel_shuffle = raw_model.pixel_shuffle model.HRconv = raw_model.HRconv model.conv_last = raw_model.conv_last model.lrelu = raw_model.lrelu ##################################################### model.eval() model = model.to(device) #avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) subfolder_GT_a_l = sorted(glob.glob(osp.join(aposterior_GT_dataset_folder, "*"))) # for each subfolder for subfolder, subfolder_GT, subfolder_GT_a in zip(subfolder_l, subfolder_GT_l, subfolder_GT_a_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) print("MAX_IDX: ", max_idx) print("SAVE FOLDER::::::", save_folder) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): img_GT_l.append(data_util.read_img(None, img_GT_path)) img_GT_a = [] for img_GT_a_path in sorted(glob.glob(osp.join(subfolder_GT_a, '*'))): img_GT_a.append(data_util.read_img(None, img_GT_a_path)) #avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select(0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) if flip_test: output = util.flipx4_forward(model, imgs_in) else: print("IMGS_IN SHAPE: ", imgs_in.shape) output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) if save_imgs and stage == 1: cv2.imwrite(osp.join(save_subfolder, '{}.png'.format(img_name)), output) # calculate PSNR if stage == 2: output = output / 255. GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel #if data_mode == 'Vid4': # bgr2y, [0, 1] GT_a = np.copy(img_GT_a[img_idx]) output_a = copy.deepcopy(output) output, GT = util.crop_border([output, GT], crop_border) crt_psnr = util.calculate_psnr(output * 255, GT * 255) crt_ssim = util.calculate_ssim(output * 255, GT * 255) output_a, GT_a = util.crop_border([output_a, GT_a], crop_border) crt_aposterior = util.calculate_ssim(output_a * 255, GT_a * 255) # CHANGE t = reds4_results[subfolder_name].get(str(img_name)) if t != None: reds4_results[subfolder_name][img_name].add_psnr(crt_psnr) reds4_results[subfolder_name][img_name].add_gt_ssim(crt_ssim) reds4_results[subfolder_name][img_name].add_aposterior_ssim(crt_aposterior) else: reds4_results[subfolder_name].update({img_name: metrics_file(img_name)}) reds4_results[subfolder_name][img_name].add_psnr(crt_psnr) reds4_results[subfolder_name][img_name].add_gt_ssim(crt_ssim) reds4_results[subfolder_name][img_name].add_aposterior_ssim(crt_aposterior) ############################################################################ #### model #### writing reds4 results util.mkdirs('../results/000') util.mkdirs('../results/011') util.mkdirs('../results/015') util.mkdirs('../results/020') save_folder = '../results/' for i, dir_name in enumerate(["000", "011", "015", "020"]): # + save_subfolder = osp.join(save_folder, dir_name) for j, value in reds4_results[dir_name].items(): # cur_result = json.dumps(value.__dict__) with open(osp.join(save_subfolder, '{}.json'.format(value.name)), 'w') as outfile: json.dump(value.__dict__, outfile, ensure_ascii=False, indent=4)
def main(): ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '0' data_mode = 'Vid4' # Vid4 | sharp_bicubic | blur_bicubic | blur | blur_comp # Vid4: SR # REDS4: sharp_bicubic (SR-clean), blur_bicubic (SR-blur); # blur (deblur-clean), blur_comp (deblur-compression). stage = 1 # 1 or 2, use two stage strategy for REDS dataset. flip_test = False ############################################################################ #### model if data_mode == 'Vid4': if stage == 1: model_path = '../experiments/pretrained_models/cinepak_small2.pth' else: raise ValueError('Vid4 does not support stage 2.') elif data_mode == 'sharp_bicubic': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SR_Stage2.pth' elif data_mode == 'blur_bicubic': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_Stage2.pth' elif data_mode == 'blur': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_Stage2.pth' elif data_mode == 'blur_comp': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_L.pth' else: model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_Stage2.pth' else: raise NotImplementedError if data_mode == 'Vid4': N_in = 7 # use N_in images to restore one HR image else: N_in = 5 predeblur, HR_in = False, False back_RBs = 10 if data_mode == 'blur_bicubic': predeblur = True if data_mode == 'blur' or data_mode == 'blur_comp': predeblur, HR_in = True, True if stage == 2: HR_in = True back_RBs = 20 model = EDVR_arch.EDVR(64, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #### dataset if data_mode == 'Vid4': test_dataset_folder = '../datasets/Vid4/BIx4' GT_dataset_folder = '../datasets/Vid4/GT' else: if stage == 1: test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode) else: test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4' print('You should modify the test_dataset_folder path for stage 2') GT_dataset_folder = '../datasets/REDS4/GT' #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'Vid4' or data_mode == 'sharp_bicubic': padding = 'new_info' else: padding = 'replicate' save_imgs = True save_folder = '../results/{}'.format(data_mode) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) # for each subfolder for subfolder in subfolder_l: print('Processing video {:s}'.format(subfolder)) subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) # process each image for img_idx, img_path in enumerate(img_path_l): print('\tProcessing frame {:s}') img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) cv2.imwrite(osp.join(save_subfolder, '{}.png'.format(img_name)), output)
def main(): ################# # configurations ################# parser = argparse.ArgumentParser() parser.add_argument("--input_path", type=str, required=True) # parser.add_argument("--gt_path", type=str, required=True) parser.add_argument("--output_path", type=str, required=True) parser.add_argument("--model_path", type=str, required=True) parser.add_argument("--gpu_id", type=str, required=True) parser.add_argument("--gpu_number", type=str, required=True) parser.add_argument("--gpu_index", type=str, required=True) parser.add_argument("--screen_notation", type=str, required=True) parser.add_argument('--opt', type=str, required=True, help='Path to option YAML file.') args = parser.parse_args() opt = option.parse(args.opt, is_train=False) gpu_number = int(args.gpu_number) gpu_index = int(args.gpu_index) PAD = 32 total_run_time = AverageMeter() # print("GPU ", torch.cuda.device_count()) device = torch.device('cuda') # os.environ['CUDA_VISIBLE_DEVICES'] = '0' os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id) print('export CUDA_VISIBLE_DEVICES=' + str(args.gpu_id)) data_mode = 'sharp_bicubic' # Vid4 | sharp_bicubic | blur_bicubic | blur | blur_comp # Vid4: SR # REDS4: sharp_bicubic (SR-clean), blur_bicubic (SR-blur); # blur (deblur-clean), blur_comp (deblur-compression). stage = 1 # 1 or 2, use two stage strategy for REDS dataset. flip_test = False # Input_folder = "/DATA7_DB7/data/4khdr/data/Dataset/train_sharp_bicubic" # GT_folder = "/DATA7_DB7/data/4khdr/data/Dataset/train_4k" # Result_folder = "/DATA7_DB7/data/4khdr/data/Results" Input_folder = args.input_path # GT_folder = args.gt_path Result_folder = args.output_path Model_path = args.model_path # create results folder if not os.path.exists(Result_folder): os.makedirs(Result_folder, exist_ok=True) ############################################################################ #### model # if data_mode == 'Vid4': # if stage == 1: # model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth' # else: # raise ValueError('Vid4 does not support stage 2.') # elif data_mode == 'sharp_bicubic': # if stage == 1: # # model_path = '../experiments/pretrained_models/EDVR_REDS_SR_L.pth' # else: # model_path = '../experiments/pretrained_models/EDVR_REDS_SR_Stage2.pth' # elif data_mode == 'blur_bicubic': # if stage == 1: # model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_L.pth' # else: # model_path = '../experiments/pretrained_models/EDVR_REDS_SRblur_Stage2.pth' # elif data_mode == 'blur': # if stage == 1: # model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_L.pth' # else: # model_path = '../experiments/pretrained_models/EDVR_REDS_deblur_Stage2.pth' # elif data_mode == 'blur_comp': # if stage == 1: # model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_L.pth' # else: # model_path = '../experiments/pretrained_models/EDVR_REDS_deblurcomp_Stage2.pth' # else: # raise NotImplementedError model_path = Model_path if data_mode == 'Vid4': N_in = 7 # use N_in images to restore one HR image else: N_in = 5 predeblur, HR_in = False, False back_RBs = 40 if data_mode == 'blur_bicubic': predeblur = True if data_mode == 'blur' or data_mode == 'blur_comp': predeblur, HR_in = True, True if stage == 2: HR_in = True back_RBs = 20 model = EDVR_arch.EDVR(nf=opt['network_G']['nf'], nframes=opt['network_G']['nframes'], groups=opt['network_G']['groups'], front_RBs=opt['network_G']['front_RBs'], back_RBs=opt['network_G']['back_RBs'], predeblur=opt['network_G']['predeblur'], HR_in=opt['network_G']['HR_in'], w_TSA=opt['network_G']['w_TSA']) # model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #### dataset if data_mode == 'Vid4': test_dataset_folder = '../datasets/Vid4/BIx4' GT_dataset_folder = '../datasets/Vid4/GT' else: if stage == 1: # test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode) # test_dataset_folder = '/DATA/wangshen_data/REDS/val_sharp_bicubic/X4' test_dataset_folder = Input_folder else: test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4' print('You should modify the test_dataset_folder path for stage 2') # GT_dataset_folder = '../datasets/REDS4/GT' # GT_dataset_folder = '/DATA/wangshen_data/REDS/val_sharp' # GT_dataset_folder = GT_folder #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'Vid4' or data_mode == 'sharp_bicubic': padding = 'new_info' else: padding = 'replicate' save_imgs = True # save_folder = '../results/{}'.format(data_mode) # save_folder = '/DATA/wangshen_data/REDS/results/{}'.format(data_mode) save_folder = os.path.join(Result_folder, data_mode) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) # subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) # for each subfolder # for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): end = time.time() # load screen change notation import json with open(args.screen_notation) as f: frame_notation = json.load(f) subfolder_n = len(subfolder_l) subfolder_l = subfolder_l[int(subfolder_n * gpu_index / gpu_number):int(subfolder_n * (gpu_index + 1) / gpu_number)] for subfolder in subfolder_l: input_subfolder = os.path.split(subfolder)[1] # subfolder_GT = os.path.join(GT_dataset_folder,input_subfolder) #if not os.path.exists(subfolder_GT): # continue print("Evaluate Folders: ", input_subfolder) subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) # Num x 3 x H x W #img_GT_l = [] #for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): # img_GT_l.append(data_util.read_img(None, img_GT_path)) #avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] # todo here handle screen change select_idx, log1, log2, nota = data_util.index_generation_process_screen_change_withlog_fixbug( input_subfolder, frame_notation, img_idx, max_idx, N_in, padding=padding) if not log1 == None: logger.info('screen change') logger.info(nota) logger.info(log1) logger.info(log2) imgs_in = imgs_LQ.index_select( 0, torch.LongTensor(select_idx)).unsqueeze(0).to( device) # 960 x 540 # here we split the input images 960x540 into 9 320x180 patch gtWidth = 3840 gtHeight = 2160 intWidth_ori = imgs_in.shape[4] # 960 intHeight_ori = imgs_in.shape[3] # 540 split_lengthY = 180 split_lengthX = 320 scale = 4 intPaddingRight_ = int(float(intWidth_ori) / split_lengthX + 1) * split_lengthX - intWidth_ori intPaddingBottom_ = int(float(intHeight_ori) / split_lengthY + 1) * split_lengthY - intHeight_ori intPaddingRight_ = 0 if intPaddingRight_ == split_lengthX else intPaddingRight_ intPaddingBottom_ = 0 if intPaddingBottom_ == split_lengthY else intPaddingBottom_ pader0 = torch.nn.ReplicationPad2d( [0, intPaddingRight_, 0, intPaddingBottom_]) print("Init pad right/bottom " + str(intPaddingRight_) + " / " + str(intPaddingBottom_)) intPaddingRight = PAD # 32# 64# 128# 256 intPaddingLeft = PAD # 32#64 #128# 256 intPaddingTop = PAD # 32#64 #128#256 intPaddingBottom = PAD # 32#64 # 128# 256 pader = torch.nn.ReplicationPad2d([ intPaddingLeft, intPaddingRight, intPaddingTop, intPaddingBottom ]) imgs_in = torch.squeeze(imgs_in, 0) # N C H W imgs_in = pader0(imgs_in) # N C 540 960 imgs_in = pader(imgs_in) # N C 604 1024 assert (split_lengthY == int(split_lengthY) and split_lengthX == int(split_lengthX)) split_lengthY = int(split_lengthY) split_lengthX = int(split_lengthX) split_numY = int(float(intHeight_ori) / split_lengthY) split_numX = int(float(intWidth_ori) / split_lengthX) splitsY = range(0, split_numY) splitsX = range(0, split_numX) intWidth = split_lengthX intWidth_pad = intWidth + intPaddingLeft + intPaddingRight intHeight = split_lengthY intHeight_pad = intHeight + intPaddingTop + intPaddingBottom # print("split " + str(split_numY) + ' , ' + str(split_numX)) y_all = np.zeros((gtHeight, gtWidth, 3), dtype="float32") # HWC for split_j, split_i in itertools.product(splitsY, splitsX): # print(str(split_j) + ", \t " + str(split_i)) X0 = imgs_in[:, :, split_j * split_lengthY:(split_j + 1) * split_lengthY + intPaddingBottom + intPaddingTop, split_i * split_lengthX:(split_i + 1) * split_lengthX + intPaddingRight + intPaddingLeft] # y_ = torch.FloatTensor() X0 = torch.unsqueeze(X0, 0) # N C H W -> 1 N C H W if flip_test: output = util.flipx4_forward(model, X0) else: output = util.single_forward(model, X0) output_depadded = output[0, :, intPaddingTop * scale:(intPaddingTop + intHeight) * scale, intPaddingLeft * scale:(intPaddingLeft + intWidth) * scale] output_depadded = output_depadded.squeeze(0) output = util.tensor2img(output_depadded) y_all[split_j * split_lengthY * scale :(split_j + 1) * split_lengthY * scale, split_i * split_lengthX * scale :(split_i + 1) * split_lengthX * scale, :] = \ np.round(output).astype(np.uint8) # plt.figure(0) # plt.title("pic") # plt.imshow(y_all) if save_imgs: cv2.imwrite( osp.join(save_subfolder, '{}.png'.format(img_name)), y_all) print("*****************current image process time \t " + str(time.time() - end) + "s ******************") total_run_time.update(time.time() - end, 1) # calculate PSNR #y_all = y_all / 255. #GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel #if data_mode == 'Vid4': # bgr2y, [0, 1] # GT = data_util.bgr2ycbcr(GT, only_y=True) # y_all = data_util.bgr2ycbcr(y_all, only_y=True) #y_all, GT = util.crop_border([y_all, GT], crop_border) #crt_psnr = util.calculate_psnr(y_all * 255, GT * 255) #logger.info('{:3d} - {:25} \tPSNR: {:.6f} dB'.format(img_idx + 1, img_name, crt_psnr)) logger.info('{} : {:3d} - {:25} \t'.format(input_subfolder, img_idx + 1, img_name)) #if img_idx >= border_frame and img_idx < max_idx - border_frame: # center frames # avg_psnr_center += crt_psnr # N_center += 1 #else: # border frames # avg_psnr_border += crt_psnr # N_border += 1 #avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border) #avg_psnr_center = avg_psnr_center / N_center #avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border #avg_psnr_l.append(avg_psnr) #avg_psnr_center_l.append(avg_psnr_center) #avg_psnr_border_l.append(avg_psnr_border) #logger.info('Folder {} - Average PSNR: {:.6f} dB for {} frames; ' # 'Center PSNR: {:.6f} dB for {} frames; ' # 'Border PSNR: {:.6f} dB for {} frames.'.format(subfolder_name, avg_psnr, # (N_center + N_border), # avg_psnr_center, N_center, # avg_psnr_border, N_border)) #logger.info('################ Tidy Outputs ################') #for subfolder_name, psnr, psnr_center, psnr_border in zip(subfolder_name_l, avg_psnr_l, # avg_psnr_center_l, avg_psnr_border_l): # logger.info('Folder {} - Average PSNR: {:.6f} dB. ' # 'Center PSNR: {:.6f} dB. ' # 'Border PSNR: {:.6f} dB.'.format(subfolder_name, psnr, psnr_center, # psnr_border)) #logger.info('################ Final Results ################') logger.info('Data: {} - {}'.format(data_mode, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test))
def main(): # Create object for parsing command-line options parser = argparse.ArgumentParser(description="Test with EDVR, requre path to test dataset folder.") # Add argument which takes path to a bag file as an input parser.add_argument("-i", "--input", type=str, help="Path to test folder") # Parse the command line arguments to an object args = parser.parse_args() # Safety if no parameter have been given if not args.input: print("No input paramater have been given.") print("For help type --help") exit() folder_name = args.input.split("/")[-1] if folder_name == '': index = len(args.input.split("/")) - 2 folder_name = args.input.split("/")[index] ################# # configurations ################# device = torch.device('cuda') os.environ['CUDA_VISIBLE_DEVICES'] = '0' data_mode = 'Vid4' # Vid4 | sharp_bicubic | blur_bicubic | blur | blur_comp # Vid4: SR # REDS4: sharp_bicubic (SR-clean), blur_bicubic (SR-blur); # blur (deblur-clean), blur_comp (deblur-compression). stage = 1 # 1 or 2, use two stage strategy for REDS dataset. flip_test = False ############################################################################ #### model if data_mode == 'Vid4': if stage == 1: model_path = '../experiments/pretrained_models/EDVR_Vimeo90K_SR_L.pth' else: raise ValueError('Vid4 does not support stage 2.') else: raise NotImplementedError if data_mode == 'Vid4': N_in = 7 # use N_in images to restore one HR image else: N_in = 5 predeblur, HR_in = False, False back_RBs = 40 model = EDVR_arch.EDVR(128, N_in, 8, 5, back_RBs, predeblur=predeblur, HR_in=HR_in) #### dataset if data_mode == 'Vid4': # debug test_dataset_folder = os.path.join(args.input, 'BIx4') GT_dataset_folder = os.path.join(args.input, 'GT') else: if stage == 1: test_dataset_folder = '../datasets/REDS4/{}'.format(data_mode) else: test_dataset_folder = '../results/REDS-EDVR_REDS_SR_L_flipx4' print('You should modify the test_dataset_folder path for stage 2') GT_dataset_folder = '../datasets/REDS4/GT' #### evaluation crop_border = 0 border_frame = N_in // 2 # border frames when evaluate # temporal padding mode if data_mode == 'Vid4' or data_mode == 'sharp_bicubic': padding = 'new_info' else: padding = 'replicate' save_imgs = True save_folder = '../results/{}'.format(folder_name) util.mkdirs(save_folder) util.setup_logger('base', save_folder, 'test', level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') #### log info logger.info('Data: {} - {}'.format(folder_name, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) #### set up the models model.load_state_dict(torch.load(model_path), strict=True) model.eval() model = model.to(device) avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l = [], [], [] subfolder_name_l = [] subfolder_l = sorted(glob.glob(osp.join(test_dataset_folder, '*'))) subfolder_GT_l = sorted(glob.glob(osp.join(GT_dataset_folder, '*'))) # for each subfolder for subfolder, subfolder_GT in zip(subfolder_l, subfolder_GT_l): subfolder_name = osp.basename(subfolder) subfolder_name_l.append(subfolder_name) save_subfolder = osp.join(save_folder, subfolder_name) img_path_l = sorted(glob.glob(osp.join(subfolder, '*'))) max_idx = len(img_path_l) if save_imgs: util.mkdirs(save_subfolder) #### read LQ and GT images imgs_LQ = data_util.read_img_seq(subfolder) img_GT_l = [] for img_GT_path in sorted(glob.glob(osp.join(subfolder_GT, '*'))): img_GT_l.append(data_util.read_img(None, img_GT_path)) avg_psnr, avg_psnr_border, avg_psnr_center, N_border, N_center = 0, 0, 0, 0, 0 # process each image for img_idx, img_path in enumerate(img_path_l): img_name = osp.splitext(osp.basename(img_path))[0] select_idx = data_util.index_generation(img_idx, max_idx, N_in, padding=padding) imgs_in = imgs_LQ.index_select(0, torch.LongTensor(select_idx)).unsqueeze(0).to(device) if flip_test: output = util.flipx4_forward(model, imgs_in) else: output = util.single_forward(model, imgs_in) output = util.tensor2img(output.squeeze(0)) if save_imgs: cv2.imwrite(osp.join(save_subfolder, '{}.png'.format(img_name)), output) # calculate PSNR output = output / 255. GT = np.copy(img_GT_l[img_idx]) # For REDS, evaluate on RGB channels; for Vid4, evaluate on the Y channel if data_mode == 'Vid4': # bgr2y, [0, 1] GT = data_util.bgr2ycbcr(GT, only_y=True) output = data_util.bgr2ycbcr(output, only_y=True) output, GT = util.crop_border([output, GT], crop_border) crt_psnr = util.calculate_psnr(output * 255, GT * 255) logger.info('{:3d} - {:25} \tPSNR: {:.6f} dB'.format(img_idx + 1, img_name, crt_psnr)) if img_idx >= border_frame and img_idx < max_idx - border_frame: # center frames avg_psnr_center += crt_psnr N_center += 1 else: # border frames avg_psnr_border += crt_psnr N_border += 1 avg_psnr = (avg_psnr_center + avg_psnr_border) / (N_center + N_border) avg_psnr_center = avg_psnr_center / N_center avg_psnr_border = 0 if N_border == 0 else avg_psnr_border / N_border avg_psnr_l.append(avg_psnr) avg_psnr_center_l.append(avg_psnr_center) avg_psnr_border_l.append(avg_psnr_border) logger.info('Folder {} - Average PSNR: {:.6f} dB for {} frames; ' 'Center PSNR: {:.6f} dB for {} frames; ' 'Border PSNR: {:.6f} dB for {} frames.'.format(subfolder_name, avg_psnr, (N_center + N_border), avg_psnr_center, N_center, avg_psnr_border, N_border)) logger.info('################ Tidy Outputs ################') for subfolder_name, psnr, psnr_center, psnr_border in zip(subfolder_name_l, avg_psnr_l, avg_psnr_center_l, avg_psnr_border_l): logger.info('Folder {} - Average PSNR: {:.6f} dB. ' 'Center PSNR: {:.6f} dB. ' 'Border PSNR: {:.6f} dB.'.format(subfolder_name, psnr, psnr_center, psnr_border)) logger.info('################ Final Results ################') logger.info('Data: {} - {}'.format(folder_name, test_dataset_folder)) logger.info('Padding mode: {}'.format(padding)) logger.info('Model path: {}'.format(model_path)) logger.info('Save images: {}'.format(save_imgs)) logger.info('Flip test: {}'.format(flip_test)) logger.info('Total Average PSNR: {:.6f} dB for {} clips. ' 'Center PSNR: {:.6f} dB. Border PSNR: {:.6f} dB.'.format( sum(avg_psnr_l) / len(avg_psnr_l), len(subfolder_l), sum(avg_psnr_center_l) / len(avg_psnr_center_l), sum(avg_psnr_border_l) / len(avg_psnr_border_l)))