def __init__(self, opt): super(DatasetPlainPatch, self).__init__() print( 'Get L/H for image-to-image mapping. Both "paths_L" and "paths_H" are needed.' ) self.opt = opt self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 64 self.num_patches_per_image = opt['num_patches_per_image'] if opt[ 'num_patches_per_image'] else 40 self.num_sampled = opt['num_sampled'] if opt['num_sampled'] else 3000 # ------------------- # get the path of L/H # ------------------- self.paths_H = util.get_image_paths(opt['dataroot_H']) self.paths_L = util.get_image_paths(opt['dataroot_L']) assert self.paths_H, 'Error: H path is empty.' assert self.paths_L, 'Error: L path is empty. This dataset uses L path, you can use dataset_dnpatchh' if self.paths_L and self.paths_H: assert len(self.paths_L) == len( self.paths_H ), 'H and L datasets have different number of images - {}, {}.'.format( len(self.paths_L), len(self.paths_H)) # ------------------------------------ # number of sampled images # ------------------------------------ self.num_sampled = min(self.num_sampled, len(self.paths_H)) # ------------------------------------ # reserve space with zeros # ------------------------------------ self.total_patches = self.num_sampled * self.num_patches_per_image self.H_data = np.zeros([ self.total_patches, self.path_size, self.path_size, self.n_channels ], dtype=np.uint8) self.L_data = np.zeros([ self.total_patches, self.path_size, self.path_size, self.n_channels ], dtype=np.uint8) # ------------------------------------ # update H patches # ------------------------------------ self.update_data()
def general_image_folder(opt): therwise, it will store every resolution info. img_folder = opt['img_folder'] lmdb_save_path = opt['lmdb_save_path'] meta_info = {'name': opt['name']} if not lmdb_save_path.endswith('.lmdb'): raise ValueError("lmdb_save_path must end with 'lmdb'.") if os.path.exists(lmdb_save_path): print('Folder [{:s}] already exists. Exit...'.format(lmdb_save_path)) sys.exit(1) # read all the image paths to a list print('Reading image path list ...') all_img_list = util.get_image_paths(img_folder) keys = [] for img_path in all_img_list: img_path_split = img_path.split('/')[-2:] img_name_ext = img_path_split[0] + '_' + img_path_split[1] img_name, ext = os.path.splitext(img_name_ext) keys.append(img_name) data_size_per_img = cv2.imread(all_img_list[0], cv2.IMREAD_UNCHANGED).nbytes print('data size per image is: ', data_size_per_img) data_size = data_size_per_img * len(all_img_list) env = lmdb.open(lmdb_save_path, map_size=data_size * 10) txn = env.begin(write=True) resolutions = [] tqdm_iter = tqdm(enumerate(zip(all_img_list, keys)), total=len(all_img_list), leave=False) for idx, (path, key) in tqdm_iter: tqdm_iter.set_description('Write {}'.format(key)) key_byte = key.encode('ascii') data = util.imread_uint(path, 3) H, W, C = data.shape resolutions.append('{:d}_{:d}_{:d}'.format(C, H, W)) txn.put(key_byte, data) if (idx + 1) % opt['commit_interval'] == 0: txn.commit() txn = env.begin(write=True) txn.commit() env.close() print('Finish writing lmdb.') assert len(keys) == len(resolutions) if len(set(resolutions)) <= 1: meta_info['resolution'] = [resolutions[0]] meta_info['keys'] = keys print('All images have the same resolution. Simplify the meta info.') else: meta_info['resolution'] = resolutions meta_info['keys'] = keys print('Not all images have the same resolution. Save meta info for each image.') pickle.dump(meta_info, open(os.path.join(lmdb_save_path, 'meta_info.pkl'), "wb")) print('Finish creating lmdb meta info.')
def __init__(self, opt): super(DatasetSR, self).__init__() self.opt = opt self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 self.sf = opt['scale'] if opt['scale'] else 4 self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96 self.L_size = self.patch_size // self.sf # ------------------------------------ # get paths of L/H # ------------------------------------ self.paths_H = util.get_image_paths(opt['dataroot_H']) self.paths_L = util.get_image_paths(opt['dataroot_L']) assert self.paths_H, 'Error: H path is empty.' if self.paths_L and self.paths_H: assert len(self.paths_L) == len(self.paths_H), 'L/H mismatch - {}, {}.'.format(len(self.paths_L), len(self.paths_H))
def __init__(self, opt): super(DatasetPlain, self).__init__() print('Get L/H for image-to-image mapping. Both "paths_L" and "paths_H" are needed.') self.opt = opt self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 64 # ------------------------------------ # get the path of L/H # ------------------------------------ self.paths_H = util.get_image_paths(opt['dataroot_H']) self.paths_L = util.get_image_paths(opt['dataroot_L']) assert self.paths_H, 'Error: H path is empty.' assert self.paths_L, 'Error: L path is empty. Plain dataset assumes both L and H are given!' if self.paths_L and self.paths_H: assert len(self.paths_L) == len(self.paths_H), 'L/H mismatch - {}, {}.'.format(len(self.paths_L), len(self.paths_H))
def __init__(self, opt): super(DatasetDPSR, self).__init__() self.opt = opt self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 self.sf = opt['scale'] if opt['scale'] else 4 self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96 self.L_size = self.patch_size // self.sf self.sigma = opt['sigma'] if opt['sigma'] else [0, 50] self.sigma_min, self.sigma_max = self.sigma[0], self.sigma[1] self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else 0 # ------------------------------------ # get paths of L/H # ------------------------------------ self.paths_H = util.get_image_paths(opt['dataroot_H']) self.paths_L = util.get_image_paths(opt['dataroot_L']) assert self.paths_H, 'Error: H path is empty.'
def __init__(self, opt): super(DatasetL, self).__init__() print('Read L in testing. Only "dataroot_L" is needed.') self.opt = opt self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 # ------------------------------------ # get the path of L # ------------------------------------ self.paths_L = util.get_image_paths(opt['dataroot_L']) assert self.paths_L, 'Error: L paths are empty.'
def __init__(self, opt): super(DatasetFFDNet, self).__init__() self.opt = opt self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 self.patch_size = self.opt['H_size'] if opt['H_size'] else 64 self.sigma = opt['sigma'] if opt['sigma'] else [0, 75] self.sigma_min, self.sigma_max = self.sigma[0], self.sigma[1] self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else 25 # ------------------------------------- # get the path of H, return None if input is None # ------------------------------------- self.paths_H = util.get_image_paths(opt['dataroot_H'])
def __init__(self, opt): super(DatasetSRMD, self).__init__() self.opt = opt self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 self.sf = opt['scale'] if opt['scale'] else 4 self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96 self.L_size = self.patch_size // self.sf self.sigma = opt['sigma'] if opt['sigma'] else [0, 50] self.sigma_min, self.sigma_max = self.sigma[0], self.sigma[1] self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else 0 # ------------------------------------- # PCA projection matrix # ------------------------------------- self.p = hdf5storage.loadmat(os.path.join('kernels', 'srmd_pca_pytorch.mat'))['p'] self.ksize = int(np.sqrt(self.p.shape[-1])) # kernel size # ------------------------------------ # get paths of L/H # ------------------------------------ self.paths_H = util.get_image_paths(opt['dataroot_H']) self.paths_L = util.get_image_paths(opt['dataroot_L'])
def __init__(self, opt): super(DatasetDnCNN, self).__init__() print( 'Dataset: Denosing on AWGN with fixed sigma. Only dataroot_H is needed.' ) self.opt = opt self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 self.patch_size = opt['H_size'] if opt['H_size'] else 64 self.sigma = opt['sigma'] if opt['sigma'] else 25 self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else self.sigma # ------------------------------------ # get path of H # return None if input is None # ------------------------------------ self.paths_H = util.get_image_paths(opt['dataroot_H'])
def __init__(self, opt): super(DatasetUSRNet, self).__init__() self.opt = opt self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96 self.sigma_max = self.opt['sigma_max'] if self.opt['sigma_max'] is not None else 25 self.scales = opt['scales'] if opt['scales'] is not None else [1,2,3,4] self.sf_validation = opt['sf_validation'] if opt['sf_validation'] is not None else 3 #self.kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels'] self.kernels = loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels'] # for validation # ------------------- # get the path of H # ------------------- self.paths_H = util.get_image_paths(opt['dataroot_H']) # return None if input is None self.count = 0
def __init__(self, opt): super(DatasetDnPatch, self).__init__() print( 'Get L/H for denosing on AWGN with fixed sigma. Only dataroot_H is needed.' ) self.opt = opt self.n_channels = opt['n_channels'] if opt['n_channels'] else 3 self.patch_size = opt['H_size'] if opt['H_size'] else 64 self.sigma = opt['sigma'] if opt['sigma'] else 25 self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else self.sigma self.num_patches_per_image = opt['num_patches_per_image'] if opt[ 'num_patches_per_image'] else 40 self.num_sampled = opt['num_sampled'] if opt['num_sampled'] else 3000 # ------------------------------------ # get paths of H # ------------------------------------ self.paths_H = util.get_image_paths(opt['dataroot_H']) assert self.paths_H, 'Error: H path is empty.' # ------------------------------------ # number of sampled H images # ------------------------------------ self.num_sampled = min(self.num_sampled, len(self.paths_H)) # ------------------------------------ # reserve space with zeros # ------------------------------------ self.total_patches = self.num_sampled * self.num_patches_per_image self.H_data = np.zeros([ self.total_patches, self.patch_size, self.patch_size, self.n_channels ], dtype=np.uint8) # ------------------------------------ # update H patches # ------------------------------------ self.update_data()
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- if noise_level_model == -1: model_name = 'srmdnf_x' + str(sf) else: model_name = 'srmd_x' + str(sf) model_path = os.path.join(model_pool, model_name+'.pth') in_nc = 18 if 'nf' in model_name else 19 # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = sources # L_path, for Low-quality images E_path = results # E_path, for Estimated images if not os.path.splitext(E_path)[1]: util.mkdir(E_path) device = torch.device(using_device) # ---------------------------------------- # load model # ---------------------------------------- from utils.network_srmd import SRMD as net model = net(in_nc=in_nc, out_nc=n_channels, nc=nc, nb=nb, upscale=sf, act_mode='R', upsample_mode='pixelshuffle') model.load_state_dict(torch.load(model_path), strict=False) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) if os.path.isfile(L_path): L_paths = [L_path] else: L_paths = util.get_image_paths(L_path) # ---------------------------------------- # kernel and PCA reduced feature # ---------------------------------------- # Gaussian kernel, delta kernel 0.01 kernel = utils_deblur.fspecial('gaussian', 15, 0.01) P = loadmat(srmd_pca_path)['P'] degradation_vector = np.dot(P, np.reshape(kernel, (-1), order="F")) if 'nf' not in model_name: # noise-free SR degradation_vector = np.append( degradation_vector, noise_level_model/255.) degradation_vector = torch.from_numpy( degradation_vector).view(1, -1, 1, 1).float() for _, img in enumerate(L_paths): img_name, _ = os.path.splitext(os.path.basename(img)) try: # ------------------------------------ # (1) img_L # ------------------------------------ img_L, alpha = util.imread_uint_alpha(img, n_channels=n_channels) # Bicubic to handle alpha channel if the intended picture is supposed to have. if not alpha is None and picture_format == "png": alpha = util.uint2tensor4(alpha) alpha = torch.nn.functional.interpolate( alpha, scale_factor=sf, mode='bicubic', align_corners=False) alpha = alpha.to(device) alpha = torch.clamp(alpha, 0, 255) alpha = util.tensor2uint(alpha) img_L = util.uint2tensor4(img_L) degradation_map = degradation_vector.repeat( 1, 1, img_L.size(-2), img_L.size(-1)) img_L = torch.cat((img_L, degradation_map), dim=1) img_L = img_L.to(device) # ------------------------------------ # (2) img_E # ------------------------------------ if not x8: img_E = model(img_L) else: img_E = utils_model.test_mode(model, img_L, mode=3, sf=sf) img_E = util.tensor2uint(img_E) if not alpha is None and picture_format == "png": alpha = alpha.reshape((alpha.shape[0], alpha.shape[1], 1)) img_E = np.concatenate((img_E, alpha), axis=2) elif not alpha is None: print("Warning! You lost your alpha channel for this picture!") # ------------------------------------ # save results # ------------------------------------ if os.path.splitext(E_path)[1]: util.imsave(img_E, E_path) else: util.imsave(img_E, os.path.join( E_path, img_name+'.' + picture_format)) print(os.path.basename(img) + " successfully saved to disk!") except Exception: traceback.print_exc() print(os.path.basename(img) + " failed!")
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 0 / 255.0 # set AWGN noise level for LR image, default: 0 noise_level_model = noise_level_img # set noise level of model, default: 0 model_name = 'ircnn_color' # set denoiser, 'drunet_color' | 'ircnn_color' testset_name = 'Set18' # set testing set, 'set18' | 'set24' x8 = True # set PGSE to boost performance, default: True iter_num = 40 # set number of iterations, default: 40 for demosaicing modelSigma1 = 49 # set sigma_1, default: 49 modelSigma2 = max(0.6, noise_level_model * 255.) # set sigma_2, default matlab_init = True show_img = False # default: False save_L = True # save LR image save_E = True # save estimated image save_LEH = False # save zoomed LR, E and H images border = 10 # default 10 for demosaicing task_current = 'dm' # 'dm' for demosaicing n_channels = 3 # fixed model_zoo = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + task_current + '_' + model_name model_path = os.path.join(model_zoo, model_name + '.pth') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) # ---------------------------------------- # load model # ---------------------------------------- if 'drunet' in model_name: from models.network_unet import UNetRes as net model = net(in_nc=n_channels + 1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) elif 'ircnn' in model_name: from models.network_dncnn import IRCNN as net model = net(in_nc=n_channels, out_nc=n_channels, nc=64) model25 = torch.load(model_path) former_idx = 0 logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format( model_name, noise_level_img, noise_level_model)) logger.info('Model path: {:s}'.format(model_path)) logger.info(L_path) L_paths = util.get_image_paths(L_path) test_results = OrderedDict() test_results['psnr'] = [] for idx, img in enumerate(L_paths): # -------------------------------- # (1) get img_H and img_L # -------------------------------- idx += 1 img_name, ext = os.path.splitext(os.path.basename(img)) img_H = util.imread_uint(img, n_channels=n_channels) CFA, CFA4, mosaic, mask = utils_mosaic.mosaic_CFA_Bayer(img_H) # -------------------------------- # (2) initialize x # -------------------------------- if matlab_init: # matlab demosaicing for initialization CFA4 = util.uint2tensor4(CFA4).to(device) x = utils_mosaic.dm_matlab(CFA4) else: x = cv2.cvtColor(CFA, cv2.COLOR_BAYER_BG2RGB_EA) x = util.uint2tensor4(x).to(device) img_L = util.tensor2uint(x) y = util.uint2tensor4(mosaic).to(device) util.imshow(img_L) if show_img else None mask = util.single2tensor4(mask.astype(np.float32)).to(device) # -------------------------------- # (3) get rhos and sigmas # -------------------------------- rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255., noise_level_img), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1.0) rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to( device) # -------------------------------- # (4) main iterations # -------------------------------- for i in range(iter_num): # -------------------------------- # step 1, closed-form solution # -------------------------------- x = (y + rhos[i].float() * x).div(mask + rhos[i]) # -------------------------------- # step 2, denoiser # -------------------------------- if 'ircnn' in model_name: current_idx = np.int( np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1) if current_idx != former_idx: model.load_state_dict(model25[str(current_idx)], strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) former_idx = current_idx x = torch.clamp(x, 0, 1) if x8: x = util.augment_img_tensor4(x, i % 8) if 'drunet' in model_name: x = torch.cat((x, sigmas[i].float().repeat( 1, 1, x.shape[2], x.shape[3])), dim=1) x = utils_model.test_mode(model, x, mode=2, refield=32, min_size=256, modulo=16) # x = model(x) elif 'ircnn' in model_name: x = model(x) if x8: if i % 8 == 3 or i % 8 == 5: x = util.augment_img_tensor4(x, 8 - i % 8) else: x = util.augment_img_tensor4(x, i % 8) x[mask.to(torch.bool)] = y[mask.to(torch.bool)] # -------------------------------- # (4) img_E # -------------------------------- img_E = util.tensor2uint(x) psnr = util.calculate_psnr(img_E, img_H, border=border) test_results['psnr'].append(psnr) logger.info('{:->4d}--> {:>10s} -- PSNR: {:.2f}dB'.format( idx, img_name + ext, psnr)) if save_E: util.imsave( img_E, os.path.join(E_path, img_name + '_' + model_name + '.png')) if save_L: util.imsave(img_L, os.path.join(E_path, img_name + '_L.png')) if save_LEH: util.imsave( np.concatenate([img_L, img_E, img_H], axis=1), os.path.join(E_path, img_name + model_name + '_LEH.png')) ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) logger.info('------> Average PSNR(RGB) of ({}) is : {:.2f} dB'.format( testset_name, ave_psnr))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 0 # default: 0, noise level for LR image noise_level_model = noise_level_img # noise level for model model_name = 'srmdnf_x4' # 'srmd_x2' | 'srmd_x3' | 'srmd_x4' | 'srmdnf_x2' | 'srmdnf_x3' | 'srmdnf_x4' testset_name = 'set5' # test set, 'set5' | 'srbsd68' sf = [int(s) for s in re.findall(r'\d+', model_name)][0] # scale factor x8 = False # default: False, x8 to boost performance need_degradation = True # default: True, use degradation model to generate LR image show_img = False # default: False srmd_pca_path = os.path.join('kernels', 'srmd_pca_matlab.mat') task_current = 'sr' # 'dn' for denoising | 'sr' for super-resolution n_channels = 3 # fixed in_nc = 18 if 'nf' in model_name else 19 nc = 128 # fixed, number of channels nb = 12 # fixed, number of conv layers model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + model_name border = sf if task_current == 'sr' else 0 # shave boader to calculate PSNR and SSIM model_path = os.path.join(model_pool, model_name + '.pth') # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images H_path = L_path # H_path, for High-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) if H_path == L_path: need_degradation = True logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) need_H = True if H_path is not None else False device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- from models.network_srmd import SRMD as net model = net(in_nc=in_nc, out_nc=n_channels, nc=nc, nb=nb, upscale=sf, act_mode='R', upsample_mode='pixelshuffle') model.load_state_dict(torch.load(model_path), strict=False) model.eval() for k, v in model.named_parameters(): v.requires_grad = False model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) test_results = OrderedDict() test_results['psnr'] = [] test_results['ssim'] = [] test_results['psnr_y'] = [] test_results['ssim_y'] = [] logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format( model_name, noise_level_img, noise_level_model)) logger.info(L_path) L_paths = util.get_image_paths(L_path) H_paths = util.get_image_paths(H_path) if need_H else None # ---------------------------------------- # kernel and PCA reduced feature # ---------------------------------------- # kernel = sr.anisotropic_Gaussian(ksize=15, theta=np.pi, l1=4, l2=4) kernel = utils_deblur.fspecial('gaussian', 15, 0.01) # Gaussian kernel, delta kernel 0.01 P = loadmat(srmd_pca_path)['P'] degradation_vector = np.dot(P, np.reshape(kernel, (-1), order="F")) if 'nf' not in model_name: # noise-free SR degradation_vector = np.append(degradation_vector, noise_level_model / 255.) degradation_vector = torch.from_numpy(degradation_vector).view( 1, -1, 1, 1).float() for idx, img in enumerate(L_paths): # ------------------------------------ # (1) img_L # ------------------------------------ img_name, ext = os.path.splitext(os.path.basename(img)) # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) # degradation process, blur + bicubic downsampling + Gaussian noise if need_degradation: img_L = util.modcrop(img_L, sf) img_L = sr.srmd_degradation( img_L, kernel, sf ) # equivalent to bicubic degradation if kernel is a delta kernel np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img / 255., img_L.shape) util.imshow(util.single2uint(img_L), title='LR image with noise level {}'.format( noise_level_img)) if show_img else None img_L = util.single2tensor4(img_L) degradation_map = degradation_vector.repeat(1, 1, img_L.size(-2), img_L.size(-1)) img_L = torch.cat((img_L, degradation_map), dim=1) img_L = img_L.to(device) # ------------------------------------ # (2) img_E # ------------------------------------ if not x8: img_E = model(img_L) else: img_E = utils_model.test_mode(model, img_L, mode=3, sf=sf) img_E = util.tensor2uint(img_E) if need_H: # -------------------------------- # (3) img_H # -------------------------------- img_H = util.imread_uint(H_paths[idx], n_channels=n_channels) img_H = img_H.squeeze() img_H = util.modcrop(img_H, sf) # -------------------------------- # PSNR and SSIM # -------------------------------- psnr = util.calculate_psnr(img_E, img_H, border=border) ssim = util.calculate_ssim(img_E, img_H, border=border) test_results['psnr'].append(psnr) test_results['ssim'].append(ssim) logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format( img_name + ext, psnr, ssim)) util.imshow(np.concatenate([img_E, img_H], axis=1), title='Recovered / Ground-truth') if show_img else None if np.ndim(img_H) == 3: # RGB image img_E_y = util.rgb2ycbcr(img_E, only_y=True) img_H_y = util.rgb2ycbcr(img_H, only_y=True) psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border) ssim_y = util.calculate_ssim(img_E_y, img_H_y, border=border) test_results['psnr_y'].append(psnr_y) test_results['ssim_y'].append(ssim_y) # ------------------------------------ # save results # ------------------------------------ util.imsave(img_E, os.path.join(E_path, img_name + '.png')) if need_H: ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) ave_ssim = sum(test_results['ssim']) / len(test_results['ssim']) logger.info( 'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}' .format(result_name, sf, ave_psnr, ave_ssim)) if np.ndim(img_H) == 3: ave_psnr_y = sum(test_results['psnr_y']) / len( test_results['psnr_y']) ave_ssim_y = sum(test_results['ssim_y']) / len( test_results['ssim_y']) logger.info( 'Average PSNR/SSIM( Y ) - {} - x{} - PSNR: {:.2f} dB; SSIM: {:.4f}' .format(result_name, sf, ave_psnr_y, ave_ssim_y))
def main(): """ # ---------------------------------------------------------------------------------- # In real applications, you should set proper # - "noise_level_img": from [3, 25], set 3 for clean image, try 15 for very noisy LR images # - "k" (or "kernel_width"): blur kernel is very important!!! kernel_width from [0.6, 3.0] # to get the best performance. # ---------------------------------------------------------------------------------- """ ############################################################################## testset_name = 'Set3C' # set test set, 'set5' | 'srbsd68' noise_level_img = 3 # set noise level of image, from [3, 25], set 3 for clean image model_name = 'drunet_color' # 'ircnn_color' # set denoiser, | 'drunet_color' | 'ircnn_gray' | 'drunet_gray' | 'ircnn_color' sf = 2 # set scale factor, 1, 2, 3, 4 iter_num = 24 # set number of iterations, default: 24 for SISR # -------------------------------- # set blur kernel # -------------------------------- kernel_width_default_x1234 = [ 0.6, 0.9, 1.7, 2.2 ] # Gaussian kernel widths for x1, x2, x3, x4 noise_level_model = noise_level_img / 255. # noise level of model kernel_width = kernel_width_default_x1234[sf - 1] """ # set your own kernel width !!!!!!!!!! """ # kernel_width = 1.0 k = utils_deblur.fspecial('gaussian', 25, kernel_width) k = sr.shift_pixel(k, sf) # shift the kernel k /= np.sum(k) ############################################################################## show_img = False util.surf(k) if show_img else None x8 = True # default: False, x8 to boost performance modelSigma1 = 49 # set sigma_1, default: 49 modelSigma2 = max(sf, noise_level_model * 255.) classical_degradation = True # set classical degradation or bicubic degradation task_current = 'sr' # 'sr' for super-resolution n_channels = 1 if 'gray' in model_name else 3 # fixed model_zoo = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_realapplications_' + task_current + '_' + model_name model_path = os.path.join(model_zoo, model_name + '.pth') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) # ---------------------------------------- # load model # ---------------------------------------- if 'drunet' in model_name: from models.network_unet import UNetRes as net model = net(in_nc=n_channels + 1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) elif 'ircnn' in model_name: from models.network_dncnn import IRCNN as net model = net(in_nc=n_channels, out_nc=n_channels, nc=64) model25 = torch.load(model_path) former_idx = 0 logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format( model_name, noise_level_img, noise_level_model)) logger.info('Model path: {:s}'.format(model_path)) logger.info(L_path) L_paths = util.get_image_paths(L_path) for idx, img in enumerate(L_paths): # -------------------------------- # (1) get img_L # -------------------------------- logger.info('Model path: {:s} Image: {:s}'.format(model_path, img)) img_name, ext = os.path.splitext(os.path.basename(img)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) img_L = util.modcrop(img_L, 8) # modcrop # -------------------------------- # (2) get rhos and sigmas # -------------------------------- rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255., noise_level_model), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1) rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to( device) # -------------------------------- # (3) initialize x, and pre-calculation # -------------------------------- x = cv2.resize(img_L, (img_L.shape[1] * sf, img_L.shape[0] * sf), interpolation=cv2.INTER_CUBIC) if np.ndim(x) == 2: x = x[..., None] if classical_degradation: x = sr.shift_pixel(x, sf) x = util.single2tensor4(x).to(device) img_L_tensor, k_tensor = util.single2tensor4( img_L), util.single2tensor4(np.expand_dims(k, 2)) [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor], device) FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf) # -------------------------------- # (4) main iterations # -------------------------------- for i in range(iter_num): print('Iter: {} / {}'.format(i, iter_num)) # -------------------------------- # step 1, FFT # -------------------------------- tau = rhos[i].float().repeat(1, 1, 1, 1) x = sr.data_solution(x, FB, FBC, F2B, FBFy, tau, sf) if 'ircnn' in model_name: current_idx = np.int( np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1) if current_idx != former_idx: model.load_state_dict(model25[str(current_idx)], strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) former_idx = current_idx # -------------------------------- # step 2, denoiser # -------------------------------- if x8: x = util.augment_img_tensor4(x, i % 8) if 'drunet' in model_name: x = torch.cat( (x, sigmas[i].repeat(1, 1, x.shape[2], x.shape[3])), dim=1) x = utils_model.test_mode(model, x, mode=2, refield=64, min_size=256, modulo=16) elif 'ircnn' in model_name: x = model(x) if x8: if i % 8 == 3 or i % 8 == 5: x = util.augment_img_tensor4(x, 8 - i % 8) else: x = util.augment_img_tensor4(x, i % 8) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(x) util.imsave( img_E, os.path.join(E_path, img_name + '_x' + str(sf) + '_' + model_name + '.png'))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- model_name = 'usrnet' # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny' testset_name = 'set5' # test set, 'set5' | 'srbsd68' test_sf = [4] if 'gan' in model_name else [ 2, 3, 4 ] # scale factor, from {1,2,3,4} show_img = False # default: False save_L = True # save LR image save_E = True # save estimated image save_LEH = False # save zoomed LR, E and H images # ---------------------------------------- # load testing kernels # ---------------------------------------- # kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels.mat'))['kernels'] kernels = loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels'] n_channels = 1 if 'gray' in model_name else 3 # 3 for color image, 1 for grayscale image model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed noise_level_img = 0 # fixed: 0, noise level for LR image noise_level_model = noise_level_img # fixed, noise level of model, default 0 result_name = testset_name + '_' + model_name model_path = os.path.join(model_pool, model_name + '.pth') # ---------------------------------------- # L_path = H_path, E_path, logger # ---------------------------------------- L_path = os.path.join( testsets, testset_name) # L_path and H_path, fixed, for Low-quality images E_path = os.path.join(results, result_name) # E_path, fixed, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- if 'tiny' in model_name: model = net(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") else: model = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for key, v in model.named_parameters(): v.requires_grad = False number_parameters = sum(map(lambda x: x.numel(), model.parameters())) model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) logger.info('Params number: {}'.format(number_parameters)) logger.info('Model_name:{}, image sigma:{}'.format(model_name, noise_level_img)) logger.info(L_path) L_paths = util.get_image_paths(L_path) # -------------------------------- # read images # -------------------------------- test_results_ave = OrderedDict() test_results_ave['psnr_sf_k'] = [] for sf in test_sf: for k_index in range(kernels.shape[1]): test_results = OrderedDict() test_results['psnr'] = [] kernel = kernels[0, k_index].astype(np.float64) ## other kernels # kernel = utils_deblur.blurkernel_synthesis(h=25) # motion kernel # kernel = utils_deblur.fspecial('gaussian', 25, 1.6) # Gaussian kernel # kernel = sr.shift_pixel(kernel, sf) # pixel shift; optional # kernel /= np.sum(kernel) util.surf(kernel) if show_img else None idx = 0 for img in L_paths: # -------------------------------- # (1) classical degradation, img_L # -------------------------------- idx += 1 img_name, ext = os.path.splitext(os.path.basename(img)) img_H = util.imread_uint( img, n_channels=n_channels) # HR image, int8 img_H = util.modcrop(img_H, np.lcm(sf, 8)) # modcrop # generate degraded LR image img_L = ndimage.filters.convolve(img_H, kernel[..., np.newaxis], mode='wrap') # blur img_L = sr.downsample_np( img_L, sf, center=False) # downsample, standard s-fold downsampler img_L = util.uint2single(img_L) # uint2single np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN util.imshow(util.single2uint(img_L)) if show_img else None x = util.single2tensor4(img_L) k = util.single2tensor4(kernel[..., np.newaxis]) sigma = torch.tensor(noise_level_model).float().view( [1, 1, 1, 1]) [x, k, sigma] = [el.to(device) for el in [x, k, sigma]] # -------------------------------- # (2) inference # -------------------------------- x = model(x, k, sf, sigma) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(x) if save_E: util.imsave( img_E, os.path.join( E_path, img_name + '_x' + str(sf) + '_k' + str(k_index + 1) + '_' + model_name + '.png')) # -------------------------------- # (4) img_LEH # -------------------------------- img_L = util.single2uint(img_L) if save_LEH: k_v = kernel / np.max(kernel) * 1.2 k_v = util.single2uint( np.tile(k_v[..., np.newaxis], [1, 1, 3])) k_v = cv2.resize(k_v, (3 * k_v.shape[1], 3 * k_v.shape[0]), interpolation=cv2.INTER_NEAREST) img_I = cv2.resize( img_L, (sf * img_L.shape[1], sf * img_L.shape[0]), interpolation=cv2.INTER_NEAREST) img_I[:k_v.shape[0], -k_v.shape[1]:, :] = k_v img_I[:img_L.shape[0], :img_L.shape[1], :] = img_L util.imshow(np.concatenate([img_I, img_E, img_H], axis=1), title='LR / Recovered / Ground-truth' ) if show_img else None util.imsave( np.concatenate([img_I, img_E, img_H], axis=1), os.path.join( E_path, img_name + '_x' + str(sf) + '_k' + str(k_index + 1) + '_LEH.png')) if save_L: util.imsave( img_L, os.path.join( E_path, img_name + '_x' + str(sf) + '_k' + str(k_index + 1) + '_LR.png')) psnr = util.calculate_psnr( img_E, img_H, border=sf**2) # change with your own border test_results['psnr'].append(psnr) logger.info( '{:->4d}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB'. format(idx, img_name + ext, sf, k_index, psnr)) ave_psnr_k = sum(test_results['psnr']) / len(test_results['psnr']) logger.info( '------> Average PSNR(RGB) of ({}) scale factor: ({}), kernel: ({}) sigma: ({}): {:.2f} dB' .format(testset_name, sf, k_index + 1, noise_level_model, ave_psnr_k)) test_results_ave['psnr_sf_k'].append(ave_psnr_k) logger.info(test_results_ave['psnr_sf_k'])
def main(): utils_logger.logger_info('AIM-track', log_path='AIM-track.log') logger = logging.getLogger('AIM-track') # -------------------------------- # basic settings # -------------------------------- testsets = 'DIV2K' testset_L = 'DIV2K_valid_LR_bicubic' #testset_L = 'DIV2K_test_LR_bicubic' torch.cuda.current_device() torch.cuda.empty_cache() #torch.backends.cudnn.benchmark = True device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # -------------------------------- # load model # -------------------------------- model_path = os.path.join('trained_model', 'RFDN_AIM.pth') model = RFDN() model.load_state_dict(torch.load(model_path), strict=True) model.eval() for k, v in model.named_parameters(): v.requires_grad = False model = model.to(device) # number of parameters number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) # -------------------------------- # read image # -------------------------------- L_folder = os.path.join(testsets, testset_L, 'X4') E_folder = os.path.join(testsets, testset_L+'_results') util.mkdir(E_folder) # record PSNR, runtime test_results = OrderedDict() test_results['runtime'] = [] logger.info(L_folder) logger.info(E_folder) idx = 0 start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) img_SR = [] for img in util.get_image_paths(L_folder): # -------------------------------- # (1) img_L # -------------------------------- idx += 1 img_name, ext = os.path.splitext(os.path.basename(img)) logger.info('{:->4d}--> {:>10s}'.format(idx, img_name+ext)) img_L = util.imread_uint(img, n_channels=3) img_L = util.uint2tensor4(img_L) img_L = img_L.to(device) start.record() img_E = model(img_L) end.record() torch.cuda.synchronize() test_results['runtime'].append(start.elapsed_time(end)) # milliseconds # -------------------------------- # (2) img_E # -------------------------------- img_E = util.tensor2uint(img_E) img_SR.append(img_E) # -------------------------------- # (3) save results # -------------------------------- #util.imsave(img_E, os.path.join(E_folder, img_name+ext)) ave_runtime = sum(test_results['runtime']) / len(test_results['runtime']) / 1000.0 logger.info('------> Average runtime of ({}) is : {:.6f} seconds'.format(L_folder, ave_runtime)) # -------------------------------- # (4) calculate psnr # -------------------------------- '''
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 15 # set AWGN noise level for noisy image noise_level_model = noise_level_img # set noise level for model model_name = 'drunet_gray' # set denoiser model, 'drunet_gray' | 'drunet_color' testset_name = 'bsd68' # set test set, 'bsd68' | 'cbsd68' | 'set12' x8 = False # default: False, x8 to boost performance show_img = False # default: False border = 0 # shave boader to calculate PSNR and SSIM if 'color' in model_name: n_channels = 3 # 3 for color image else: n_channels = 1 # 1 for grayscale image model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed task_current = 'dn' # 'dn' for denoising result_name = testset_name + '_' + task_current + '_' + model_name model_path = os.path.join(model_pool, model_name + '.pth') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) # ---------------------------------------- # load model # ---------------------------------------- from models.network_unet import UNetRes as net model = net(in_nc=n_channels + 1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for k, v in model.named_parameters(): v.requires_grad = False model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) test_results = OrderedDict() test_results['psnr'] = [] test_results['ssim'] = [] logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format( model_name, noise_level_img, noise_level_model)) logger.info(L_path) L_paths = util.get_image_paths(L_path) for idx, img in enumerate(L_paths): # ------------------------------------ # (1) img_L # ------------------------------------ img_name, ext = os.path.splitext(os.path.basename(img)) # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext)) img_H = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_H) # Add noise without clipping np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img / 255., img_L.shape) util.imshow(util.single2uint(img_L), title='Noisy image with noise level {}'.format( noise_level_img)) if show_img else None img_L = util.single2tensor4(img_L) img_L = torch.cat( (img_L, torch.FloatTensor([noise_level_model / 255.]).repeat( 1, 1, img_L.shape[2], img_L.shape[3])), dim=1) img_L = img_L.to(device) # ------------------------------------ # (2) img_E # ------------------------------------ if not x8 and img_L.size(2) // 8 == 0 and img_L.size(3) // 8 == 0: img_E = model(img_L) elif not x8 and (img_L.size(2) // 8 != 0 or img_L.size(3) // 8 != 0): img_E = utils_model.test_mode(model, img_L, refield=64, mode=5) elif x8: img_E = utils_model.test_mode(model, img_L, mode=3) img_E = util.tensor2uint(img_E) # -------------------------------- # PSNR and SSIM # -------------------------------- if n_channels == 1: img_H = img_H.squeeze() psnr = util.calculate_psnr(img_E, img_H, border=border) ssim = util.calculate_ssim(img_E, img_H, border=border) test_results['psnr'].append(psnr) test_results['ssim'].append(ssim) logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format( img_name + ext, psnr, ssim)) # ------------------------------------ # save results # ------------------------------------ util.imsave(img_E, os.path.join(E_path, img_name + ext)) ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) ave_ssim = sum(test_results['ssim']) / len(test_results['ssim']) logger.info( 'Average PSNR/SSIM(RGB) - {} - PSNR: {:.2f} dB; SSIM: {:.4f}'.format( result_name, ave_psnr, ave_ssim))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 0/255.0 # set AWGN noise level for LR image, default: 0, noise_level_model = noise_level_img # setnoise level of model, default 0 model_name = 'drunet_color' # set denoiser, | 'drunet_color' | 'ircnn_gray' | 'drunet_gray' | 'ircnn_color' testset_name = 'srbsd68' # set test set, 'set5' | 'srbsd68' x8 = True # default: False, x8 to boost performance test_sf = [2] # set scale factor, default: [2, 3, 4], [2], [3], [4] iter_num = 24 # set number of iterations, default: 24 for SISR modelSigma1 = 49 # set sigma_1, default: 49 classical_degradation = True # set classical degradation or bicubic degradation show_img = False # default: False save_L = True # save LR image save_E = True # save estimated image save_LEH = False # save zoomed LR, E and H images task_current = 'sr' # 'sr' for super-resolution n_channels = 1 if 'gray' in model_name else 3 # fixed model_zoo = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + task_current + '_' + model_name model_path = os.path.join(model_zoo, model_name+'.pth') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log')) logger = logging.getLogger(logger_name) # ---------------------------------------- # load model # ---------------------------------------- if 'drunet' in model_name: from models.network_unet import UNetRes as net model = net(in_nc=n_channels+1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) elif 'ircnn' in model_name: from models.network_dncnn import IRCNN as net model = net(in_nc=n_channels, out_nc=n_channels, nc=64) model25 = torch.load(model_path) former_idx = 0 logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(model_name, noise_level_img, noise_level_model)) logger.info('Model path: {:s}'.format(model_path)) logger.info(L_path) L_paths = util.get_image_paths(L_path) # -------------------------------- # load kernel # -------------------------------- # kernels = hdf5storage.loadmat(os.path.join('kernels', 'Levin09.mat'))['kernels'] if classical_degradation: kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels'] else: kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernel_bicubicx234.mat'))['kernels'] test_results_ave = OrderedDict() test_results_ave['psnr_sf_k'] = [] test_results_ave['psnr_y_sf_k'] = [] for sf in test_sf: border = sf modelSigma2 = max(sf, noise_level_model*255.) k_num = 8 if classical_degradation else 1 for k_index in range(k_num): logger.info('--------- sf:{:>1d} --k:{:>2d} ---------'.format(sf, k_index)) test_results = OrderedDict() test_results['psnr'] = [] test_results['psnr_y'] = [] if not classical_degradation: # for bicubic degradation k_index = sf-2 k = kernels[0, k_index].astype(np.float64) util.surf(k) if show_img else None for idx, img in enumerate(L_paths): # -------------------------------- # (1) get img_L # -------------------------------- img_name, ext = os.path.splitext(os.path.basename(img)) img_H = util.imread_uint(img, n_channels=n_channels) img_H = util.modcrop(img_H, sf) # modcrop if classical_degradation: img_L = sr.classical_degradation(img_H, k, sf) util.imshow(img_L) if show_img else None img_L = util.uint2single(img_L) else: img_L = util.imresize_np(util.uint2single(img_H), 1/sf) np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN # -------------------------------- # (2) get rhos and sigmas # -------------------------------- rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255/255., noise_level_model), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1) rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to(device) # -------------------------------- # (3) initialize x, and pre-calculation # -------------------------------- x = cv2.resize(img_L, (img_L.shape[1]*sf, img_L.shape[0]*sf), interpolation=cv2.INTER_CUBIC) if np.ndim(x)==2: x = x[..., None] if classical_degradation: x = sr.shift_pixel(x, sf) x = util.single2tensor4(x).to(device) img_L_tensor, k_tensor = util.single2tensor4(img_L), util.single2tensor4(np.expand_dims(k, 2)) [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor], device) FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf) # -------------------------------- # (4) main iterations # -------------------------------- for i in range(iter_num): # -------------------------------- # step 1, FFT # -------------------------------- tau = rhos[i].float().repeat(1, 1, 1, 1) x = sr.data_solution(x.float(), FB, FBC, F2B, FBFy, tau, sf) if 'ircnn' in model_name: current_idx = np.int(np.ceil(sigmas[i].cpu().numpy()*255./2.)-1) if current_idx != former_idx: model.load_state_dict(model25[str(current_idx)], strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) former_idx = current_idx # -------------------------------- # step 2, denoiser # -------------------------------- if x8: x = util.augment_img_tensor4(x, i % 8) if 'drunet' in model_name: x = torch.cat((x, sigmas[i].float().repeat(1, 1, x.shape[2], x.shape[3])), dim=1) x = utils_model.test_mode(model, x, mode=2, refield=32, min_size=256, modulo=16) elif 'ircnn' in model_name: x = model(x) if x8: if i % 8 == 3 or i % 8 == 5: x = util.augment_img_tensor4(x, 8 - i % 8) else: x = util.augment_img_tensor4(x, i % 8) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(x) if save_E: util.imsave(img_E, os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_'+model_name+'.png')) if n_channels == 1: img_H = img_H.squeeze() # -------------------------------- # (4) img_LEH # -------------------------------- img_L = util.single2uint(img_L).squeeze() if save_LEH: k_v = k/np.max(k)*1.0 if n_channels==1: k_v = util.single2uint(k_v) else: k_v = util.single2uint(np.tile(k_v[..., np.newaxis], [1, 1, n_channels])) k_v = cv2.resize(k_v, (3*k_v.shape[1], 3*k_v.shape[0]), interpolation=cv2.INTER_NEAREST) img_I = cv2.resize(img_L, (sf*img_L.shape[1], sf*img_L.shape[0]), interpolation=cv2.INTER_NEAREST) img_I[:k_v.shape[0], -k_v.shape[1]:, ...] = k_v img_I[:img_L.shape[0], :img_L.shape[1], ...] = img_L util.imshow(np.concatenate([img_I, img_E, img_H], axis=1), title='LR / Recovered / Ground-truth') if show_img else None util.imsave(np.concatenate([img_I, img_E, img_H], axis=1), os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_LEH.png')) if save_L: util.imsave(img_L, os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_LR.png')) psnr = util.calculate_psnr(img_E, img_H, border=border) test_results['psnr'].append(psnr) logger.info('{:->4d}--> {:>10s} -- sf:{:>1d} --k:{:>2d} PSNR: {:.2f}dB'.format(idx+1, img_name+ext, sf, k_index, psnr)) if n_channels == 3: img_E_y = util.rgb2ycbcr(img_E, only_y=True) img_H_y = util.rgb2ycbcr(img_H, only_y=True) psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border) test_results['psnr_y'].append(psnr_y) # -------------------------------- # Average PSNR for all kernels # -------------------------------- ave_psnr_k = sum(test_results['psnr']) / len(test_results['psnr']) logger.info('------> Average PSNR(RGB) of ({}) scale factor: ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB'.format(testset_name, sf, k_index, noise_level_model, ave_psnr_k)) test_results_ave['psnr_sf_k'].append(ave_psnr_k) if n_channels == 3: # RGB image ave_psnr_y_k = sum(test_results['psnr_y']) / len(test_results['psnr_y']) logger.info('------> Average PSNR(Y) of ({}) scale factor: ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB'.format(testset_name, sf, k_index, noise_level_model, ave_psnr_y_k)) test_results_ave['psnr_y_sf_k'].append(ave_psnr_y_k) # --------------------------------------- # Average PSNR for all sf and kernels # --------------------------------------- ave_psnr_sf_k = sum(test_results_ave['psnr_sf_k']) / len(test_results_ave['psnr_sf_k']) logger.info('------> Average PSNR of ({}) {:.2f} dB'.format(testset_name, ave_psnr_sf_k)) if n_channels == 3: ave_psnr_y_sf_k = sum(test_results_ave['psnr_y_sf_k']) / len(test_results_ave['psnr_y_sf_k']) logger.info('------> Average PSNR of ({}) {:.2f} dB'.format(testset_name, ave_psnr_y_sf_k))
model_name = 'dncnn_25' sigma = 25 testset_name = 'bsd68' device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') criterion = nn.MSELoss(reduction='sum') lr = 1e-5 epochs = 100 test_results = OrderedDict() test_results['psnr_before'] = [] test_results['psnr_after'] = [] model_path = os.path.join(model_pool, model_name + '.pth') test_paths = os.path.join(testsets, testset_name) test_paths = util.get_image_paths(test_paths) # training loop for idx, img in enumerate(test_paths): start_time = time.time() # load model model = DnCNN(in_nc=n_channels, out_nc=n_channels, nc=64, nb=17, act_mode='R') model.load_state_dict(torch.load(model_path), strict=True) model = model.to(device) model.eval()
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- model_name = 'dncnn3' # 'dncnn3'- can be used for blind Gaussian denoising, JPEG deblocking (quality factor 5-100) and super-resolution (x234) # important! testset_name = 'bsd68' # test set, low-quality grayscale/color JPEG images n_channels = 1 # set 1 for grayscale image, set 3 for color image x8 = False # default: False, x8 to boost performance testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + model_name # fixed L_path = os.path.join( testsets, testset_name ) # L_path, for Low-quality grayscale/Y-channel JPEG images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) model_pool = 'model_zoo' # fixed model_path = os.path.join(model_pool, model_name + '.pth') logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- from models.network_dncnn import DnCNN as net model = net(in_nc=1, out_nc=1, nc=64, nb=20, act_mode='R') model.load_state_dict(torch.load(model_path), strict=True) model.eval() for k, v in model.named_parameters(): v.requires_grad = False model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) logger.info(L_path) L_paths = util.get_image_paths(L_path) for idx, img in enumerate(L_paths): # ------------------------------------ # (1) img_L # ------------------------------------ img_name, ext = os.path.splitext(os.path.basename(img)) logger.info('{:->4d}--> {:>10s}'.format(idx + 1, img_name + ext)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) if n_channels == 3: ycbcr = util.rgb2ycbcr(img_L, False) img_L = ycbcr[..., 0:1] img_L = util.single2tensor4(img_L) img_L = img_L.to(device) # ------------------------------------ # (2) img_E # ------------------------------------ if not x8: img_E = model(img_L) else: img_E = utils_model.test_mode(model, img_L, mode=3) img_E = util.tensor2single(img_E) if n_channels == 3: ycbcr[..., 0] = img_E img_E = util.ycbcr2rgb(ycbcr) img_E = util.single2uint(img_E) # ------------------------------------ # save results # ------------------------------------ util.imsave(img_E, os.path.join(E_path, img_name + '.png'))
def main(model=None, model_path=None): utils_logger.logger_info('AIM-track', log_path=os.path.join(model_path, 'AIM-track.log')) logger = logging.getLogger('AIM-track') # -------------------------------- # basic settings # -------------------------------- testsets = 'DIV2K' # DIV2K root path testset_L = 'DIV2K_test_LR_bicubic' # test image folder name torch.cuda.current_device() torch.cuda.empty_cache() torch.backends.cudnn.benchmark = True device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # -------------------------------- # load model # -------------------------------- # model_path = os.path.join('MSRResNetx4_model', 'MSRResNetx4.pth') if model is None: model_path = 'MSRResNetx4_model' model = MSRResNet(in_nc=3, out_nc=3, nf=64, nb=16, upscale=4) model.load_state_dict(torch.load(os.path.join(model_path, 'model.pth')), strict=True) model.eval() """ for k, v in model.named_parameters(): v.requires_grad = False model = model.to(device) """ # number of parameters number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) print('Params number: {}'.format(number_parameters)) # -------------------------------- # read image # -------------------------------- L_folder = os.path.join(testsets, testset_L) assert os.path.isdir(L_folder) # check the test images path E_folder = os.path.join(model_path, 'results') util.mkdir(E_folder) # record PSNR, runtime test_results = OrderedDict() test_results['runtime'] = [] logger.info(L_folder) logger.info(E_folder) idx = 0 start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) for img in util.get_image_paths(L_folder): # -------------------------------- # (1) img_L # -------------------------------- idx += 1 img_name, ext = os.path.splitext(os.path.basename(img)) logger.info('{:->4d}--> {:>10s}'.format(idx, img_name + ext)) img_L = util.imread_uint(img, n_channels=3) img_L = util.uint2tensor4(img_L) img_L = img_L.to(device) start.record() img_E = model(img_L) end.record() torch.cuda.synchronize() test_results['runtime'].append(start.elapsed_time(end)) # milliseconds # torch.cuda.synchronize() # start = time.time() # img_E = model(img_L) # torch.cuda.synchronize() # end = time.time() # test_results['runtime'].append(end-start) # seconds # -------------------------------- # (2) img_E # -------------------------------- img_E = util.tensor2uint(img_E) util.imsave(img_E, os.path.join(E_folder, img_name + ext)) ave_runtime = sum(test_results['runtime']) / len( test_results['runtime']) / 1000.0 logger.info('------> Average runtime of ({}) is : {:.6f} seconds'.format( L_folder, ave_runtime)) print('------> Average runtime of ({}) is : {:.6f} seconds'.format( L_folder, ave_runtime))
def main(): utils_logger.logger_info('blind_sr_log', log_path='blind_sr_log.log') logger = logging.getLogger('blind_sr_log') # print(torch.__version__) # pytorch version # print(torch.version.cuda) # cuda version # print(torch.backends.cudnn.version()) # cudnn version testsets = 'testsets' # fixed, set path of testsets testset_Ls = ['RealSRSet'] # ['RealSRSet','DPED'] model_names = ['RRDB','ESRGAN','FSSR_DPED','FSSR_JPEG','RealSR_DPED','RealSR_JPEG'] model_names = ['BSRGAN'] save_results = True sf = 4 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') for model_name in model_names: model_path = os.path.join('model_zoo', model_name+'.pth') # set model path logger.info('{:>16s} : {:s}'.format('Model Name', model_name)) # torch.cuda.set_device(0) # set GPU ID logger.info('{:>16s} : {:<d}'.format('GPU ID', torch.cuda.current_device())) torch.cuda.empty_cache() # -------------------------------- # define network and load model # -------------------------------- model = net(in_nc=3, out_nc=3, nf=64, nb=23, gc=32) # define network # model_old = torch.load(model_path) # state_dict = model.state_dict() # for ((key, param),(key2, param2)) in zip(model_old.items(), state_dict.items()): # state_dict[key2] = param # model.load_state_dict(state_dict, strict=True) model.load_state_dict(torch.load(model_path), strict=True) model.eval() for k, v in model.named_parameters(): v.requires_grad = False model = model.to(device) torch.cuda.empty_cache() for testset_L in testset_Ls: L_path = os.path.join(testsets, testset_L) #E_path = os.path.join(testsets, testset_L+'_'+model_name) E_path = os.path.join(testsets, testset_L+'_results_x'+str(sf)) util.mkdir(E_path) logger.info('{:>16s} : {:s}'.format('Input Path', L_path)) logger.info('{:>16s} : {:s}'.format('Output Path', E_path)) idx = 0 for img in util.get_image_paths(L_path): # -------------------------------- # (1) img_L # -------------------------------- idx += 1 img_name, ext = os.path.splitext(os.path.basename(img)) logger.info('{:->4d} --> {:<s} --> x{:<d}--> {:<s}'.format(idx, model_name, sf, img_name+ext)) img_L = util.imread_uint(img, n_channels=3) img_L = util.uint2tensor4(img_L) img_L = img_L.to(device) # -------------------------------- # (2) inference # -------------------------------- img_E = model(img_L) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(img_E) if save_results: util.imsave(img_E, os.path.join(E_path, img_name+'_'+model_name+'.png'))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 50 # noise level for noisy image model_name = 'ircnn_gray' # 'ircnn_gray' | 'ircnn_color' testset_name = 'set12' # test set, 'bsd68' | 'set12' need_degradation = True # default: True x8 = False # default: False, x8 to boost performance show_img = False # default: False current_idx = min(24, np.int(np.ceil(noise_level_img/2)-1)) # current_idx+1 th denoiser task_current = 'dn' # fixed, 'dn' for denoising | 'sr' for super-resolution sf = 1 # unused for denoising if 'color' in model_name: n_channels = 3 # fixed, 1 for grayscale image, 3 for color image else: n_channels = 1 # fixed for grayscale image model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + model_name # fixed border = sf if task_current == 'sr' else 0 # shave boader to calculate PSNR and SSIM model_path = os.path.join(model_pool, model_name+'.pth') # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images H_path = L_path # H_path, for High-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) if H_path == L_path: need_degradation = True logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log')) logger = logging.getLogger(logger_name) need_H = True if H_path is not None else False device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- model25 = torch.load(model_path) from models.network_dncnn import IRCNN as net model = net(in_nc=n_channels, out_nc=n_channels, nc=64) model.load_state_dict(model25[str(current_idx)], strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) test_results = OrderedDict() test_results['psnr'] = [] test_results['ssim'] = [] logger.info('model_name:{}, image sigma:{}'.format(model_name, noise_level_img)) logger.info(L_path) L_paths = util.get_image_paths(L_path) H_paths = util.get_image_paths(H_path) if need_H else None for idx, img in enumerate(L_paths): # ------------------------------------ # (1) img_L # ------------------------------------ img_name, ext = os.path.splitext(os.path.basename(img)) # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) if need_degradation: # degradation process np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img/255., img_L.shape) util.imshow(util.single2uint(img_L), title='Noisy image with noise level {}'.format(noise_level_img)) if show_img else None img_L = util.single2tensor4(img_L) img_L = img_L.to(device) # ------------------------------------ # (2) img_E # ------------------------------------ if not x8: img_E = model(img_L) else: img_E = utils_model.test_mode(model, img_L, mode=3) img_E = util.tensor2uint(img_E) if need_H: # -------------------------------- # (3) img_H # -------------------------------- img_H = util.imread_uint(H_paths[idx], n_channels=n_channels) img_H = img_H.squeeze() # -------------------------------- # PSNR and SSIM # -------------------------------- psnr = util.calculate_psnr(img_E, img_H, border=border) ssim = util.calculate_ssim(img_E, img_H, border=border) test_results['psnr'].append(psnr) test_results['ssim'].append(ssim) logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format(img_name+ext, psnr, ssim)) util.imshow(np.concatenate([img_E, img_H], axis=1), title='Recovered / Ground-truth') if show_img else None # ------------------------------------ # save results # ------------------------------------ util.imsave(img_E, os.path.join(E_path, img_name+ext)) if need_H: ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) ave_ssim = sum(test_results['ssim']) / len(test_results['ssim']) logger.info('Average PSNR/SSIM(RGB) - {} - PSNR: {:.2f} dB; SSIM: {:.4f}'.format(result_name, ave_psnr, ave_ssim))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 30 # noise level for noisy image noise_level_model = noise_level_img # noise level for model model_name = 'ffdnet_color' # 'ffdnet_gray' | 'ffdnet_color' | 'ffdnet_color_clip' | 'ffdnet_gray_clip' testset_name = 'CBSD68' # test set, 'bsd68' | 'cbsd68' | 'set12' need_degradation = True # default: True show_img = False # default: False task_current = 'dn' # 'dn' for denoising | 'sr' for super-resolution sf = 1 # unused for denoising if 'color' in model_name: n_channels = 3 # setting for color image nc = 96 # setting for color image nb = 12 # setting for color image else: n_channels = 1 # setting for grayscale image nc = 64 # setting for grayscale image nb = 15 # setting for grayscale image if 'clip' in model_name: use_clip = True # clip the intensities into range of [0, 1] else: use_clip = False model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + model_name border = sf if task_current == 'sr' else 0 # shave boader to calculate PSNR and SSIM model_path = os.path.join(model_pool, model_name + '.pth') # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images H_path = L_path # H_path, for High-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) if H_path == L_path: need_degradation = True logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) need_H = True if H_path is not None else False device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- from models.network_ffdnet import FFDNet as net model = net(in_nc=n_channels, out_nc=n_channels, nc=nc, nb=nb, act_mode='R') model.load_state_dict(torch.load(model_path), strict=True) model.eval() for k, v in model.named_parameters(): v.requires_grad = False model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) test_results = OrderedDict() test_results['psnr'] = [] test_results['ssim'] = [] logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format( model_name, noise_level_img, noise_level_model)) logger.info(L_path) L_paths = util.get_image_paths(L_path) H_paths = util.get_image_paths(H_path) if need_H else None for idx, img in enumerate(L_paths): # ------------------------------------ # (1) img_L # ------------------------------------ img_name, ext = os.path.splitext(os.path.basename(img)) # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) if need_degradation: # degradation process np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img / 255., img_L.shape) if use_clip: img_L = util.uint2single(util.single2uint(img_L)) util.imshow(util.single2uint(img_L), title='Noisy image with noise level {}'.format( noise_level_img)) if show_img else None img_L = util.single2tensor4(img_L) img_L = img_L.to(device) sigma = torch.full((1, 1, 1, 1), noise_level_model / 255.).type_as(img_L) # ------------------------------------ # (2) img_E # ------------------------------------ img_E = model(img_L, sigma) img_E = util.tensor2uint(img_E) if need_H: # -------------------------------- # (3) img_H # -------------------------------- img_H = util.imread_uint(H_paths[idx], n_channels=n_channels) img_H = img_H.squeeze() # -------------------------------- # PSNR and SSIM # -------------------------------- psnr = util.calculate_psnr(img_E, img_H, border=border) ssim = util.calculate_ssim(img_E, img_H, border=border) test_results['psnr'].append(psnr) test_results['ssim'].append(ssim) logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format( img_name + ext, psnr, ssim)) util.imshow(np.concatenate([img_E, img_H], axis=1), title='Recovered / Ground-truth') if show_img else None # ------------------------------------ # save results # ------------------------------------ util.imsave(img_E, os.path.join(E_path, img_name + ext)) if need_H: ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) ave_ssim = sum(test_results['ssim']) / len(test_results['ssim']) logger.info( 'Average PSNR/SSIM(RGB) - {} - PSNR: {:.2f} dB; SSIM: {:.4f}'. format(result_name, ave_psnr, ave_ssim))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 7.65 / 255.0 # default: 0, noise level for LR image noise_level_model = noise_level_img # noise level of model, default 0 model_name = 'drunet_gray' # 'drunet_gray' | 'drunet_color' | 'ircnn_gray' | 'ircnn_color' testset_name = 'Set3C' # test set, 'set5' | 'srbsd68' x8 = True # default: False, x8 to boost performance iter_num = 8 # number of iterations modelSigma1 = 49 modelSigma2 = noise_level_model * 255. show_img = False # default: False save_L = True # save LR image save_E = True # save estimated image save_LEH = False # save zoomed LR, E and H images border = 0 # -------------------------------- # load kernel # -------------------------------- kernels = hdf5storage.loadmat(os.path.join('kernels', 'Levin09.mat'))['kernels'] sf = 1 task_current = 'deblur' # 'deblur' for deblurring n_channels = 3 if 'color' in model_name else 1 # fixed model_zoo = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + task_current + '_' + model_name model_path = os.path.join(model_zoo, model_name + '.pth') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) # ---------------------------------------- # load model # ---------------------------------------- if 'drunet' in model_name: from models.network_unet import UNetRes as net model = net(in_nc=n_channels + 1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) elif 'ircnn' in model_name: from models.network_dncnn import IRCNN as net model = net(in_nc=n_channels, out_nc=n_channels, nc=64) model25 = torch.load(model_path) former_idx = 0 logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format( model_name, noise_level_img, noise_level_model)) logger.info('Model path: {:s}'.format(model_path)) logger.info(L_path) L_paths = util.get_image_paths(L_path) test_results_ave = OrderedDict() test_results_ave['psnr'] = [] # record average PSNR for each kernel for k_index in range(kernels.shape[1]): logger.info('-------k:{:>2d} ---------'.format(k_index)) test_results = OrderedDict() test_results['psnr'] = [] k = kernels[0, k_index].astype(np.float64) util.imshow(k) if show_img else None for idx, img in enumerate(L_paths): # -------------------------------- # (1) get img_L # -------------------------------- img_name, ext = os.path.splitext(os.path.basename(img)) img_H = util.imread_uint(img, n_channels=n_channels) img_H = util.modcrop(img_H, 8) # modcrop img_L = ndimage.filters.convolve(img_H, np.expand_dims(k, axis=2), mode='wrap') util.imshow(img_L) if show_img else None img_L = util.uint2single(img_L) np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN # -------------------------------- # (2) get rhos and sigmas # -------------------------------- rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255., noise_level_model), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1.0) rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor( sigmas).to(device) # -------------------------------- # (3) initialize x, and pre-calculation # -------------------------------- x = util.single2tensor4(img_L).to(device) img_L_tensor, k_tensor = util.single2tensor4( img_L), util.single2tensor4(np.expand_dims(k, 2)) [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor], device) FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf) # -------------------------------- # (4) main iterations # -------------------------------- for i in range(iter_num): # -------------------------------- # step 1, FFT # -------------------------------- tau = rhos[i].float().repeat(1, 1, 1, 1) x = sr.data_solution(x, FB, FBC, F2B, FBFy, tau, sf) if 'ircnn' in model_name: current_idx = np.int( np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1) if current_idx != former_idx: model.load_state_dict(model25[str(current_idx)], strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) former_idx = current_idx # -------------------------------- # step 2, denoiser # -------------------------------- if x8: x = util.augment_img_tensor4(x, i % 8) if 'drunet' in model_name: x = torch.cat((x, sigmas[i].float().repeat( 1, 1, x.shape[2], x.shape[3])), dim=1) x = utils_model.test_mode(model, x, mode=2, refield=32, min_size=256, modulo=16) elif 'ircnn' in model_name: x = model(x) if x8: if i % 8 == 3 or i % 8 == 5: x = util.augment_img_tensor4(x, 8 - i % 8) else: x = util.augment_img_tensor4(x, i % 8) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(x) if n_channels == 1: img_H = img_H.squeeze() if save_E: util.imsave( img_E, os.path.join( E_path, img_name + '_k' + str(k_index) + '_' + model_name + '.png')) # -------------------------------- # (4) img_LEH # -------------------------------- if save_LEH: img_L = util.single2uint(img_L) k_v = k / np.max(k) * 1.0 k_v = util.single2uint(np.tile(k_v[..., np.newaxis], [1, 1, 3])) k_v = cv2.resize(k_v, (3 * k_v.shape[1], 3 * k_v.shape[0]), interpolation=cv2.INTER_NEAREST) img_I = cv2.resize(img_L, (sf * img_L.shape[1], sf * img_L.shape[0]), interpolation=cv2.INTER_NEAREST) img_I[:k_v.shape[0], -k_v.shape[1]:, :] = k_v img_I[:img_L.shape[0], :img_L.shape[1], :] = img_L util.imshow(np.concatenate([img_I, img_E, img_H], axis=1), title='LR / Recovered / Ground-truth' ) if show_img else None util.imsave( np.concatenate([img_I, img_E, img_H], axis=1), os.path.join(E_path, img_name + '_k' + str(k_index) + '_LEH.png')) if save_L: util.imsave( util.single2uint(img_L), os.path.join(E_path, img_name + '_k' + str(k_index) + '_LR.png')) psnr = util.calculate_psnr( img_E, img_H, border=border) # change with your own border test_results['psnr'].append(psnr) logger.info('{:->4d}--> {:>10s} --k:{:>2d} PSNR: {:.2f}dB'.format( idx + 1, img_name + ext, k_index, psnr)) # -------------------------------- # Average PSNR # -------------------------------- ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) logger.info( '------> Average PSNR of ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB' .format(testset_name, k_index, noise_level_model, ave_psnr)) test_results_ave['psnr'].append(ave_psnr)
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- model_name = 'usrnet' # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny' testset_name = 'set5' # test set, 'set5' | 'srbsd68' need_degradation = True # default: True sf = 4 # scale factor, only from {2, 3, 4} show_img = False # default: False save_L = True # save LR image save_E = True # save estimated image # load approximated bicubic kernels #kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_bicubicx234.mat'))['kernels'] kernels = loadmat(os.path.join('kernels', 'kernels_bicubicx234.mat'))['kernels'] kernel = kernels[0, sf - 2].astype(np.float64) kernel = util.single2tensor4(kernel[..., np.newaxis]) task_current = 'sr' # fixed, 'sr' for super-resolution n_channels = 3 # fixed, 3 for color image model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed noise_level_img = 0 # fixed: 0, noise level for LR image noise_level_model = noise_level_img # fixed, noise level of model, default 0 result_name = testset_name + '_' + model_name + '_bicubic' border = sf if task_current == 'sr' else 0 # shave boader to calculate PSNR and SSIM model_path = os.path.join(model_pool, model_name + '.pth') # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join( testsets, testset_name) # L_path, fixed, for Low-quality images H_path = L_path # H_path, 'None' | L_path, for High-quality images E_path = os.path.join(results, result_name) # E_path, fixed, for Estimated images util.mkdir(E_path) if H_path == L_path: need_degradation = True logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) need_H = True if H_path is not None else False device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- from models.network_usrnet import USRNet as net # for pytorch version <= 1.7.1 # from models.network_usrnet_v1 import USRNet as net # for pytorch version >=1.8.1 if 'tiny' in model_name: model = net(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") else: model = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for key, v in model.named_parameters(): v.requires_grad = False number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) test_results = OrderedDict() test_results['psnr'] = [] test_results['ssim'] = [] test_results['psnr_y'] = [] test_results['ssim_y'] = [] logger.info('model_name:{}, image sigma:{}'.format(model_name, noise_level_img)) logger.info(L_path) L_paths = util.get_image_paths(L_path) H_paths = util.get_image_paths(H_path) if need_H else None for idx, img in enumerate(L_paths): # ------------------------------------ # (1) img_L # ------------------------------------ img_name, ext = os.path.splitext(os.path.basename(img)) logger.info('{:->4d}--> {:>10s}'.format(idx + 1, img_name + ext)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) # degradation process, bicubic downsampling if need_degradation: img_L = util.modcrop(img_L, sf) img_L = util.imresize_np(img_L, 1 / sf) # img_L = util.uint2single(util.single2uint(img_L)) # np.random.seed(seed=0) # for reproducibility # img_L += np.random.normal(0, noise_level_img/255., img_L.shape) w, h = img_L.shape[:2] if save_L: util.imsave( util.single2uint(img_L), os.path.join(E_path, img_name + '_LR_x' + str(sf) + '.png')) img = cv2.resize(img_L, (sf * h, sf * w), interpolation=cv2.INTER_NEAREST) img = utils_deblur.wrap_boundary_liu(img, [ int(np.ceil(sf * w / 8 + 2) * 8), int(np.ceil(sf * h / 8 + 2) * 8) ]) img_wrap = sr.downsample_np(img, sf, center=False) img_wrap[:w, :h, :] = img_L img_L = img_wrap util.imshow(util.single2uint(img_L), title='LR image with noise level {}'.format( noise_level_img)) if show_img else None img_L = util.single2tensor4(img_L) img_L = img_L.to(device) # ------------------------------------ # (2) img_E # ------------------------------------ sigma = torch.tensor(noise_level_model).float().view([1, 1, 1, 1]) [img_L, kernel, sigma] = [el.to(device) for el in [img_L, kernel, sigma]] img_E = model(img_L, kernel, sf, sigma) img_E = util.tensor2uint(img_E) img_E = img_E[:sf * w, :sf * h, :] if need_H: # -------------------------------- # (3) img_H # -------------------------------- img_H = util.imread_uint(H_paths[idx], n_channels=n_channels) img_H = img_H.squeeze() img_H = util.modcrop(img_H, sf) # -------------------------------- # PSNR and SSIM # -------------------------------- psnr = util.calculate_psnr(img_E, img_H, border=border) ssim = util.calculate_ssim(img_E, img_H, border=border) test_results['psnr'].append(psnr) test_results['ssim'].append(ssim) logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format( img_name + ext, psnr, ssim)) util.imshow(np.concatenate([img_E, img_H], axis=1), title='Recovered / Ground-truth') if show_img else None if np.ndim(img_H) == 3: # RGB image img_E_y = util.rgb2ycbcr(img_E, only_y=True) img_H_y = util.rgb2ycbcr(img_H, only_y=True) psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border) ssim_y = util.calculate_ssim(img_E_y, img_H_y, border=border) test_results['psnr_y'].append(psnr_y) test_results['ssim_y'].append(ssim_y) # ------------------------------------ # save results # ------------------------------------ if save_E: util.imsave( img_E, os.path.join( E_path, img_name + '_x' + str(sf) + '_' + model_name + '.png')) if need_H: ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) ave_ssim = sum(test_results['ssim']) / len(test_results['ssim']) logger.info( 'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}' .format(result_name, sf, ave_psnr, ave_ssim)) if np.ndim(img_H) == 3: ave_psnr_y = sum(test_results['psnr_y']) / len( test_results['psnr_y']) ave_ssim_y = sum(test_results['ssim_y']) / len( test_results['ssim_y']) logger.info( 'Average PSNR/SSIM( Y ) - {} - x{} - PSNR: {:.2f} dB; SSIM: {:.4f}' .format(result_name, sf, ave_psnr_y, ave_ssim_y))
def main(): utils_logger.logger_info('efficientsr_challenge', log_path='efficientsr_challenge.log') logger = logging.getLogger('efficientsr_challenge') # print(torch.__version__) # pytorch version # print(torch.version.cuda) # cuda version # print(torch.backends.cudnn.version()) # cudnn version # -------------------------------- # basic settings # -------------------------------- model_names = ['msrresnet', 'imdn'] model_id = 1 # set the model name model_name = model_names[model_id] logger.info('{:>16s} : {:s}'.format('Model Name', model_name)) testsets = 'testsets' # set path of testsets testset_L = 'DIV2K_valid_LR' # set current testing dataset; 'DIV2K_test_LR' testset_L = 'set12' save_results = True print_modelsummary = True # set False when calculating `Max Memery` and `Runtime` torch.cuda.set_device(0) # set GPU ID logger.info('{:>16s} : {:<d}'.format('GPU ID', torch.cuda.current_device())) torch.cuda.empty_cache() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # -------------------------------- # define network and load model # -------------------------------- if model_name == 'msrresnet': from models.network_msrresnet import MSRResNet1 as net model = net(in_nc=3, out_nc=3, nc=64, nb=16, upscale=4) # define network model_path = os.path.join('model_zoo', 'msrresnet_x4_psnr.pth') # set model path elif model_name == 'imdn': from models.network_imdn import IMDN as net model = net(in_nc=3, out_nc=3, nc=64, nb=8, upscale=4, act_mode='L', upsample_mode='pixelshuffle') # define network model_path = os.path.join('model_zoo', 'imdn_x4.pth') # set model path model.load_state_dict(torch.load(model_path), strict=True) model.eval() for k, v in model.named_parameters(): v.requires_grad = False model = model.to(device) # -------------------------------- # print model summary # -------------------------------- if print_modelsummary: from utils.utils_modelsummary import get_model_activation, get_model_flops input_dim = (3, 256, 256) # set the input dimension activations, num_conv2d = get_model_activation(model, input_dim) logger.info('{:>16s} : {:<.4f} [M]'.format('#Activations', activations / 10**6)) logger.info('{:>16s} : {:<d}'.format('#Conv2d', num_conv2d)) flops = get_model_flops(model, input_dim, False) logger.info('{:>16s} : {:<.4f} [G]'.format('FLOPs', flops / 10**9)) num_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('{:>16s} : {:<.4f} [M]'.format('#Params', num_parameters / 10**6)) # -------------------------------- # read image # -------------------------------- L_path = os.path.join(testsets, testset_L) E_path = os.path.join(testsets, testset_L + '_' + model_name) util.mkdir(E_path) # record runtime test_results = OrderedDict() test_results['runtime'] = [] logger.info('{:>16s} : {:s}'.format('Input Path', L_path)) logger.info('{:>16s} : {:s}'.format('Output Path', E_path)) idx = 0 start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) for img in util.get_image_paths(L_path): # -------------------------------- # (1) img_L # -------------------------------- idx += 1 img_name, ext = os.path.splitext(os.path.basename(img)) logger.info('{:->4d}--> {:>10s}'.format(idx, img_name + ext)) img_L = util.imread_uint(img, n_channels=3) img_L = util.uint2tensor4(img_L) torch.cuda.empty_cache() img_L = img_L.to(device) start.record() img_E = model(img_L) # logger.info('{:>16s} : {:<.3f} [M]'.format('Max Memery', torch.cuda.max_memory_allocated(torch.cuda.current_device())/1024**2)) # Memery end.record() torch.cuda.synchronize() test_results['runtime'].append(start.elapsed_time(end)) # milliseconds # torch.cuda.synchronize() # start = time.time() # img_E = model(img_L) # torch.cuda.synchronize() # end = time.time() # test_results['runtime'].append(end-start) # seconds # -------------------------------- # (2) img_E # -------------------------------- img_E = util.tensor2uint(img_E) if save_results: util.imsave(img_E, os.path.join(E_path, img_name + ext)) ave_runtime = sum(test_results['runtime']) / len( test_results['runtime']) / 1000.0 logger.info('------> Average runtime of ({}) is : {:.6f} seconds'.format( L_path, ave_runtime))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 0 # default: 0, noise level for LR image noise_level_model = noise_level_img # noise level for model model_name = 'dpsr_x4_gan' # 'dpsr_x2' | 'dpsr_x3' | 'dpsr_x4' | 'dpsr_x4_gan' testset_name = 'set5' # test set, 'set5' | 'srbsd68' need_degradation = True # default: True x8 = False # default: False, x8 to boost performance sf = [int(s) for s in re.findall(r'\d+', model_name)][0] # scale factor show_img = False # default: False task_current = 'sr' # 'dn' for denoising | 'sr' for super-resolution n_channels = 3 # fixed nc = 96 # fixed, number of channels nb = 16 # fixed, number of conv layers model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + model_name border = sf if task_current == 'sr' else 0 # shave boader to calculate PSNR and SSIM model_path = os.path.join(model_pool, model_name + '.pth') # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images H_path = L_path # H_path, for High-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) if H_path == L_path: need_degradation = True logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) need_H = True if H_path is not None else False device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- from models.network_dpsr import MSRResNet_prior as net model = net(in_nc=n_channels + 1, out_nc=n_channels, nc=nc, nb=nb, upscale=sf, act_mode='R', upsample_mode='pixelshuffle') model.load_state_dict(torch.load(model_path), strict=False) model.eval() for k, v in model.named_parameters(): v.requires_grad = False model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) test_results = OrderedDict() test_results['psnr'] = [] test_results['ssim'] = [] test_results['psnr_y'] = [] test_results['ssim_y'] = [] logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format( model_name, noise_level_img, noise_level_model)) logger.info(L_path) L_paths = util.get_image_paths(L_path) H_paths = util.get_image_paths(H_path) if need_H else None for idx, img in enumerate(L_paths): # ------------------------------------ # (1) img_L # ------------------------------------ img_name, ext = os.path.splitext(os.path.basename(img)) # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) # degradation process, bicubic downsampling + Gaussian noise if need_degradation: img_L = util.modcrop(img_L, sf) img_L = util.imresize_np(img_L, 1 / sf) np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img / 255., img_L.shape) util.imshow(util.single2uint(img_L), title='LR image with noise level {}'.format( noise_level_img)) if show_img else None img_L = util.single2tensor4(img_L) noise_level_map = torch.full((1, 1, img_L.size(2), img_L.size(3)), noise_level_model / 255.).type_as(img_L) img_L = torch.cat((img_L, noise_level_map), dim=1) img_L = img_L.to(device) # ------------------------------------ # (2) img_E # ------------------------------------ if not x8: img_E = model(img_L) else: img_E = utils_model.test_mode(model, img_L, mode=3, sf=sf) img_E = util.tensor2uint(img_E) if need_H: # -------------------------------- # (3) img_H # -------------------------------- img_H = util.imread_uint(H_paths[idx], n_channels=n_channels) img_H = img_H.squeeze() img_H = util.modcrop(img_H, sf) # -------------------------------- # PSNR and SSIM # -------------------------------- psnr = util.calculate_psnr(img_E, img_H, border=border) ssim = util.calculate_ssim(img_E, img_H, border=border) test_results['psnr'].append(psnr) test_results['ssim'].append(ssim) logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format( img_name + ext, psnr, ssim)) util.imshow(np.concatenate([img_E, img_H], axis=1), title='Recovered / Ground-truth') if show_img else None if np.ndim(img_H) == 3: # RGB image img_E_y = util.rgb2ycbcr(img_E, only_y=True) img_H_y = util.rgb2ycbcr(img_H, only_y=True) psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border) ssim_y = util.calculate_ssim(img_E_y, img_H_y, border=border) test_results['psnr_y'].append(psnr_y) test_results['ssim_y'].append(ssim_y) # ------------------------------------ # save results # ------------------------------------ util.imsave(img_E, os.path.join(E_path, img_name + '.png')) if need_H: ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) ave_ssim = sum(test_results['ssim']) / len(test_results['ssim']) logger.info( 'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}' .format(result_name, sf, ave_psnr, ave_ssim)) if np.ndim(img_H) == 3: ave_psnr_y = sum(test_results['psnr_y']) / len( test_results['psnr_y']) ave_ssim_y = sum(test_results['ssim_y']) / len( test_results['ssim_y']) logger.info( 'Average PSNR/SSIM( Y ) - {} - x{} - PSNR: {:.2f} dB; SSIM: {:.4f}' .format(result_name, sf, ave_psnr_y, ave_ssim_y))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- model_name = 'usrnet_tiny' # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny' testset_name = 'srcvte' # test set, 'set5' | 'srbsd68' | 'srcvte' test_sf = [ 4 ] # if 'gan' in model_name else [2, 3, 4] # scale factor, from {1,2,3,4} load_kernels = False show_img = False # default: False save_L = False # save LR image save_E = True # save estimated image save_LEH = False # save zoomed LR, E and H images # ---------------------------------------- # load testing kernels # ---------------------------------------- # kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels.mat'))['kernels'] kernels = loadmat(os.path.join( 'kernels', 'kernels_12.mat'))['kernels'] if load_kernels else None n_channels = 1 if 'gray' in model_name else 3 # 3 for color image, 1 for grayscale image model_pool = '/home/dengzeshuai/pretrained_models/USRnet/' # fixed testsets = '/home/datasets/sr/' # fixed results = 'results' # fixed noise_level_img = 0 # fixed: 0, noise level for LR image noise_level_model = noise_level_img # fixed, noise level of model, default 0 result_name = testset_name + '_' + model_name + '_blur' model_path = os.path.join(model_pool, model_name + '.pth') # ---------------------------------------- # L_path = H_path, E_path, logger # ---------------------------------------- L_path = os.path.join( testsets, testset_name) # L_path and H_path, fixed, for Low-quality images if testset_name == 'srcvte': L_path = os.path.join(testsets, testset_name, 'LR_val') H_path = os.path.join(testsets, testset_name, 'HR_val') video_names = os.listdir(H_path) E_path = os.path.join(results, result_name) # E_path, fixed, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- if 'tiny' in model_name: model = net(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") else: model = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for key, v in model.named_parameters(): v.requires_grad = False number_parameters = sum(map(lambda x: x.numel(), model.parameters())) model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) logger.info('Params number: {}'.format(number_parameters)) logger.info('Model_name:{}, image sigma:{}'.format(model_name, noise_level_img)) logger.info(L_path) L_paths = util.get_image_paths(L_path) need_H = True if H_path is not None else False H_paths = util.get_image_paths(H_path) if need_H else None # -------------------------------- # read images # -------------------------------- test_results_ave = OrderedDict() test_results_ave['psnr_sf_k'] = [] test_results_ave['ssim_sf_k'] = [] test_results_ave['psnr_y_sf_k'] = [] test_results_ave['ssim_y_sf_k'] = [] for sf in test_sf: loop = kernels.shape[1] if load_kernels else 1 for k_index in range(loop): test_results = OrderedDict() test_results['psnr'] = [] test_results['ssim'] = [] test_results['psnr_y'] = [] test_results['ssim_y'] = [] if load_kernels: kernel = kernels[0, k_index].astype(np.float64) else: ## other kernels # kernel = utils_deblur.blurkernel_synthesis(h=25) # motion kernel kernel = utils_deblur.fspecial('gaussian', 25, 1.6) # Gaussian kernel kernel = sr.shift_pixel(kernel, sf) # pixel shift; optional kernel /= np.sum(kernel) util.surf(kernel) if show_img else None # idx = 0 for idx, img in enumerate(L_paths): # -------------------------------- # (1) classical degradation, img_L # -------------------------------- img_name, ext = os.path.splitext(os.path.basename(img)) if testset_name == 'srcvte': video_name = os.path.basename(os.path.dirname(img)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) # generate degraded LR image # img_L = ndimage.filters.convolve(img_H, kernel[..., np.newaxis], mode='wrap') # blur # img_L = sr.downsample_np(img_L, sf, center=False) # downsample, standard s-fold downsampler # img_L = util.uint2single(img_L) # uint2single # np.random.seed(seed=0) # for reproducibility # img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN util.imshow(util.single2uint(img_L)) if show_img else None x = util.single2tensor4(img_L) k = util.single2tensor4(kernel[..., np.newaxis]) sigma = torch.tensor(noise_level_model).float().view( [1, 1, 1, 1]) [x, k, sigma] = [el.to(device) for el in [x, k, sigma]] # -------------------------------- # (2) inference # -------------------------------- x = model(x, k, sf, sigma) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(x) if save_E: if testset_name == 'srcvte': save_path = os.path.join(E_path, video_name) util.mkdir(save_path) # util.imsave(img_E, os.path.join(save_path, img_name+'_k'+str(k_index+1)+'.png')) util.imsave(img_E, os.path.join(save_path, img_name + '.png')) else: util.imsave( img_E, os.path.join( E_path, img_name + '_x' + str(sf) + '_k' + str(k_index + 1) + '_' + model_name + '.png')) # -------------------------------- # (4) img_H # -------------------------------- if need_H: img_H = util.imread_uint(H_paths[idx], n_channels=n_channels) img_H = img_H.squeeze() img_H = util.modcrop(img_H, sf) psnr = util.calculate_psnr( img_E, img_H, border=sf) # change with your own border ssim = util.calculate_ssim(img_E, img_H, border=sf) test_results['psnr'].append(psnr) test_results['ssim'].append(ssim) if np.ndim(img_H) == 3: # RGB image img_E_y = util.rgb2ycbcr(img_E, only_y=True) img_H_y = util.rgb2ycbcr(img_H, only_y=True) psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=sf) ssim_y = util.calculate_ssim(img_E_y, img_H_y, border=sf) test_results['psnr_y'].append(psnr_y) test_results['ssim_y'].append(ssim_y) logger.info( '{:->4d} --> {:>4s}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB SSIM: {:.4f}' .format(idx, video_name, img_name + ext, sf, k_index, psnr_y, ssim_y)) else: logger.info( '{:->4d} --> {:>4s}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB SSIM: {:.4f}' .format(idx, video_name, img_name + ext, sf, k_index, psnr, ssim)) if need_H: ave_psnr = sum(test_results['psnr']) / len( test_results['psnr']) ave_ssim = sum(test_results['ssim']) / len( test_results['ssim']) logger.info( 'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}' .format(result_name, sf, ave_psnr, ave_ssim)) logger.info( '------> Average PSNR(RGB) - {} - x{}, kernel:{} sigma:{} --PSNR: {:.2f} dB; SSIM: {:.4f}' .format(testset_name, sf, k_index + 1, noise_level_model, ave_psnr, ave_ssim)) if np.ndim(img_H) == 3: ave_psnr_y = sum(test_results['psnr_y']) / len( test_results['psnr_y']) ave_ssim_y = sum(test_results['ssim_y']) / len( test_results['ssim_y']) logger.info( '------> Average PSNR(Y) - {} - x{}, kernel:{} sigma:{} --PSNR: {:.2f} dB; SSIM: {:.4f}' .format(testset_name, sf, k_index + 1, noise_level_model, ave_psnr_y, ave_ssim_y)) test_results_ave['psnr_sf_k'].append(ave_psnr) test_results_ave['ssim_sf_k'].append(ave_ssim) if np.ndim(img_H) == 3: test_results_ave['psnr_y_sf_k'].append(ave_psnr_y) test_results_ave['ssim_y_sf_k'].append(ave_ssim_y) logger.info(test_results_ave['psnr_sf_k']) logger.info(test_results_ave['ssim_sf_k']) if np.ndim(img_H) == 3: logger.info(test_results_ave['psnr_y_sf_k']) logger.info(test_results_ave['ssim_y_sf_k'])