def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 0 / 255.0 # set AWGN noise level for LR image, default: 0 noise_level_model = noise_level_img # set noise level of model, default: 0 model_name = 'ircnn_color' # set denoiser, 'drunet_color' | 'ircnn_color' testset_name = 'Set18' # set testing set, 'set18' | 'set24' x8 = True # set PGSE to boost performance, default: True iter_num = 40 # set number of iterations, default: 40 for demosaicing modelSigma1 = 49 # set sigma_1, default: 49 modelSigma2 = max(0.6, noise_level_model * 255.) # set sigma_2, default matlab_init = True show_img = False # default: False save_L = True # save LR image save_E = True # save estimated image save_LEH = False # save zoomed LR, E and H images border = 10 # default 10 for demosaicing task_current = 'dm' # 'dm' for demosaicing n_channels = 3 # fixed model_zoo = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + task_current + '_' + model_name model_path = os.path.join(model_zoo, model_name + '.pth') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) # ---------------------------------------- # load model # ---------------------------------------- if 'drunet' in model_name: from models.network_unet import UNetRes as net model = net(in_nc=n_channels + 1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) elif 'ircnn' in model_name: from models.network_dncnn import IRCNN as net model = net(in_nc=n_channels, out_nc=n_channels, nc=64) model25 = torch.load(model_path) former_idx = 0 logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format( model_name, noise_level_img, noise_level_model)) logger.info('Model path: {:s}'.format(model_path)) logger.info(L_path) L_paths = util.get_image_paths(L_path) test_results = OrderedDict() test_results['psnr'] = [] for idx, img in enumerate(L_paths): # -------------------------------- # (1) get img_H and img_L # -------------------------------- idx += 1 img_name, ext = os.path.splitext(os.path.basename(img)) img_H = util.imread_uint(img, n_channels=n_channels) CFA, CFA4, mosaic, mask = utils_mosaic.mosaic_CFA_Bayer(img_H) # -------------------------------- # (2) initialize x # -------------------------------- if matlab_init: # matlab demosaicing for initialization CFA4 = util.uint2tensor4(CFA4).to(device) x = utils_mosaic.dm_matlab(CFA4) else: x = cv2.cvtColor(CFA, cv2.COLOR_BAYER_BG2RGB_EA) x = util.uint2tensor4(x).to(device) img_L = util.tensor2uint(x) y = util.uint2tensor4(mosaic).to(device) util.imshow(img_L) if show_img else None mask = util.single2tensor4(mask.astype(np.float32)).to(device) # -------------------------------- # (3) get rhos and sigmas # -------------------------------- rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255., noise_level_img), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1.0) rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to( device) # -------------------------------- # (4) main iterations # -------------------------------- for i in range(iter_num): # -------------------------------- # step 1, closed-form solution # -------------------------------- x = (y + rhos[i].float() * x).div(mask + rhos[i]) # -------------------------------- # step 2, denoiser # -------------------------------- if 'ircnn' in model_name: current_idx = np.int( np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1) if current_idx != former_idx: model.load_state_dict(model25[str(current_idx)], strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) former_idx = current_idx x = torch.clamp(x, 0, 1) if x8: x = util.augment_img_tensor4(x, i % 8) if 'drunet' in model_name: x = torch.cat((x, sigmas[i].float().repeat( 1, 1, x.shape[2], x.shape[3])), dim=1) x = utils_model.test_mode(model, x, mode=2, refield=32, min_size=256, modulo=16) # x = model(x) elif 'ircnn' in model_name: x = model(x) if x8: if i % 8 == 3 or i % 8 == 5: x = util.augment_img_tensor4(x, 8 - i % 8) else: x = util.augment_img_tensor4(x, i % 8) x[mask.to(torch.bool)] = y[mask.to(torch.bool)] # -------------------------------- # (4) img_E # -------------------------------- img_E = util.tensor2uint(x) psnr = util.calculate_psnr(img_E, img_H, border=border) test_results['psnr'].append(psnr) logger.info('{:->4d}--> {:>10s} -- PSNR: {:.2f}dB'.format( idx, img_name + ext, psnr)) if save_E: util.imsave( img_E, os.path.join(E_path, img_name + '_' + model_name + '.png')) if save_L: util.imsave(img_L, os.path.join(E_path, img_name + '_L.png')) if save_LEH: util.imsave( np.concatenate([img_L, img_E, img_H], axis=1), os.path.join(E_path, img_name + model_name + '_LEH.png')) ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) logger.info('------> Average PSNR(RGB) of ({}) is : {:.2f} dB'.format( testset_name, ave_psnr))
def main(): """ # ---------------------------------------------------------------------------------- # In real applications, you should set proper # - "noise_level_img": from [3, 25], set 3 for clean image, try 15 for very noisy LR images # - "k" (or "kernel_width"): blur kernel is very important!!! kernel_width from [0.6, 3.0] # to get the best performance. # ---------------------------------------------------------------------------------- """ ############################################################################## testset_name = 'Set3C' # set test set, 'set5' | 'srbsd68' noise_level_img = 3 # set noise level of image, from [3, 25], set 3 for clean image model_name = 'drunet_color' # 'ircnn_color' # set denoiser, | 'drunet_color' | 'ircnn_gray' | 'drunet_gray' | 'ircnn_color' sf = 2 # set scale factor, 1, 2, 3, 4 iter_num = 24 # set number of iterations, default: 24 for SISR # -------------------------------- # set blur kernel # -------------------------------- kernel_width_default_x1234 = [ 0.6, 0.9, 1.7, 2.2 ] # Gaussian kernel widths for x1, x2, x3, x4 noise_level_model = noise_level_img / 255. # noise level of model kernel_width = kernel_width_default_x1234[sf - 1] """ # set your own kernel width !!!!!!!!!! """ # kernel_width = 1.0 k = utils_deblur.fspecial('gaussian', 25, kernel_width) k = sr.shift_pixel(k, sf) # shift the kernel k /= np.sum(k) ############################################################################## show_img = False util.surf(k) if show_img else None x8 = True # default: False, x8 to boost performance modelSigma1 = 49 # set sigma_1, default: 49 modelSigma2 = max(sf, noise_level_model * 255.) classical_degradation = True # set classical degradation or bicubic degradation task_current = 'sr' # 'sr' for super-resolution n_channels = 1 if 'gray' in model_name else 3 # fixed model_zoo = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_realapplications_' + task_current + '_' + model_name model_path = os.path.join(model_zoo, model_name + '.pth') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) # ---------------------------------------- # load model # ---------------------------------------- if 'drunet' in model_name: from models.network_unet import UNetRes as net model = net(in_nc=n_channels + 1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) elif 'ircnn' in model_name: from models.network_dncnn import IRCNN as net model = net(in_nc=n_channels, out_nc=n_channels, nc=64) model25 = torch.load(model_path) former_idx = 0 logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format( model_name, noise_level_img, noise_level_model)) logger.info('Model path: {:s}'.format(model_path)) logger.info(L_path) L_paths = util.get_image_paths(L_path) for idx, img in enumerate(L_paths): # -------------------------------- # (1) get img_L # -------------------------------- logger.info('Model path: {:s} Image: {:s}'.format(model_path, img)) img_name, ext = os.path.splitext(os.path.basename(img)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) img_L = util.modcrop(img_L, 8) # modcrop # -------------------------------- # (2) get rhos and sigmas # -------------------------------- rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255., noise_level_model), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1) rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to( device) # -------------------------------- # (3) initialize x, and pre-calculation # -------------------------------- x = cv2.resize(img_L, (img_L.shape[1] * sf, img_L.shape[0] * sf), interpolation=cv2.INTER_CUBIC) if np.ndim(x) == 2: x = x[..., None] if classical_degradation: x = sr.shift_pixel(x, sf) x = util.single2tensor4(x).to(device) img_L_tensor, k_tensor = util.single2tensor4( img_L), util.single2tensor4(np.expand_dims(k, 2)) [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor], device) FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf) # -------------------------------- # (4) main iterations # -------------------------------- for i in range(iter_num): print('Iter: {} / {}'.format(i, iter_num)) # -------------------------------- # step 1, FFT # -------------------------------- tau = rhos[i].float().repeat(1, 1, 1, 1) x = sr.data_solution(x, FB, FBC, F2B, FBFy, tau, sf) if 'ircnn' in model_name: current_idx = np.int( np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1) if current_idx != former_idx: model.load_state_dict(model25[str(current_idx)], strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) former_idx = current_idx # -------------------------------- # step 2, denoiser # -------------------------------- if x8: x = util.augment_img_tensor4(x, i % 8) if 'drunet' in model_name: x = torch.cat( (x, sigmas[i].repeat(1, 1, x.shape[2], x.shape[3])), dim=1) x = utils_model.test_mode(model, x, mode=2, refield=64, min_size=256, modulo=16) elif 'ircnn' in model_name: x = model(x) if x8: if i % 8 == 3 or i % 8 == 5: x = util.augment_img_tensor4(x, 8 - i % 8) else: x = util.augment_img_tensor4(x, i % 8) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(x) util.imsave( img_E, os.path.join(E_path, img_name + '_x' + str(sf) + '_' + model_name + '.png'))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 15 # set AWGN noise level for noisy image noise_level_model = noise_level_img # set noise level for model model_name = 'drunet_gray' # set denoiser model, 'drunet_gray' | 'drunet_color' testset_name = 'bsd68' # set test set, 'bsd68' | 'cbsd68' | 'set12' x8 = False # default: False, x8 to boost performance show_img = False # default: False border = 0 # shave boader to calculate PSNR and SSIM if 'color' in model_name: n_channels = 3 # 3 for color image else: n_channels = 1 # 1 for grayscale image model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed task_current = 'dn' # 'dn' for denoising result_name = testset_name + '_' + task_current + '_' + model_name model_path = os.path.join(model_pool, model_name + '.pth') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) # ---------------------------------------- # load model # ---------------------------------------- from models.network_unet import UNetRes as net model = net(in_nc=n_channels + 1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for k, v in model.named_parameters(): v.requires_grad = False model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) test_results = OrderedDict() test_results['psnr'] = [] test_results['ssim'] = [] logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format( model_name, noise_level_img, noise_level_model)) logger.info(L_path) L_paths = util.get_image_paths(L_path) for idx, img in enumerate(L_paths): # ------------------------------------ # (1) img_L # ------------------------------------ img_name, ext = os.path.splitext(os.path.basename(img)) # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext)) img_H = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_H) # Add noise without clipping np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img / 255., img_L.shape) util.imshow(util.single2uint(img_L), title='Noisy image with noise level {}'.format( noise_level_img)) if show_img else None img_L = util.single2tensor4(img_L) img_L = torch.cat( (img_L, torch.FloatTensor([noise_level_model / 255.]).repeat( 1, 1, img_L.shape[2], img_L.shape[3])), dim=1) img_L = img_L.to(device) # ------------------------------------ # (2) img_E # ------------------------------------ if not x8 and img_L.size(2) // 8 == 0 and img_L.size(3) // 8 == 0: img_E = model(img_L) elif not x8 and (img_L.size(2) // 8 != 0 or img_L.size(3) // 8 != 0): img_E = utils_model.test_mode(model, img_L, refield=64, mode=5) elif x8: img_E = utils_model.test_mode(model, img_L, mode=3) img_E = util.tensor2uint(img_E) # -------------------------------- # PSNR and SSIM # -------------------------------- if n_channels == 1: img_H = img_H.squeeze() psnr = util.calculate_psnr(img_E, img_H, border=border) ssim = util.calculate_ssim(img_E, img_H, border=border) test_results['psnr'].append(psnr) test_results['ssim'].append(ssim) logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format( img_name + ext, psnr, ssim)) # ------------------------------------ # save results # ------------------------------------ util.imsave(img_E, os.path.join(E_path, img_name + ext)) ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) ave_ssim = sum(test_results['ssim']) / len(test_results['ssim']) logger.info( 'Average PSNR/SSIM(RGB) - {} - PSNR: {:.2f} dB; SSIM: {:.4f}'.format( result_name, ave_psnr, ave_ssim))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 7.65 / 255.0 # default: 0, noise level for LR image noise_level_model = noise_level_img # noise level of model, default 0 model_name = 'drunet_gray' # 'drunet_gray' | 'drunet_color' | 'ircnn_gray' | 'ircnn_color' testset_name = 'Set3C' # test set, 'set5' | 'srbsd68' x8 = True # default: False, x8 to boost performance iter_num = 8 # number of iterations modelSigma1 = 49 modelSigma2 = noise_level_model * 255. show_img = False # default: False save_L = True # save LR image save_E = True # save estimated image save_LEH = False # save zoomed LR, E and H images border = 0 # -------------------------------- # load kernel # -------------------------------- kernels = hdf5storage.loadmat(os.path.join('kernels', 'Levin09.mat'))['kernels'] sf = 1 task_current = 'deblur' # 'deblur' for deblurring n_channels = 3 if 'color' in model_name else 1 # fixed model_zoo = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + task_current + '_' + model_name model_path = os.path.join(model_zoo, model_name + '.pth') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) # ---------------------------------------- # load model # ---------------------------------------- if 'drunet' in model_name: from models.network_unet import UNetRes as net model = net(in_nc=n_channels + 1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) elif 'ircnn' in model_name: from models.network_dncnn import IRCNN as net model = net(in_nc=n_channels, out_nc=n_channels, nc=64) model25 = torch.load(model_path) former_idx = 0 logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format( model_name, noise_level_img, noise_level_model)) logger.info('Model path: {:s}'.format(model_path)) logger.info(L_path) L_paths = util.get_image_paths(L_path) test_results_ave = OrderedDict() test_results_ave['psnr'] = [] # record average PSNR for each kernel for k_index in range(kernels.shape[1]): logger.info('-------k:{:>2d} ---------'.format(k_index)) test_results = OrderedDict() test_results['psnr'] = [] k = kernels[0, k_index].astype(np.float64) util.imshow(k) if show_img else None for idx, img in enumerate(L_paths): # -------------------------------- # (1) get img_L # -------------------------------- img_name, ext = os.path.splitext(os.path.basename(img)) img_H = util.imread_uint(img, n_channels=n_channels) img_H = util.modcrop(img_H, 8) # modcrop img_L = ndimage.filters.convolve(img_H, np.expand_dims(k, axis=2), mode='wrap') util.imshow(img_L) if show_img else None img_L = util.uint2single(img_L) np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN # -------------------------------- # (2) get rhos and sigmas # -------------------------------- rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255., noise_level_model), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1.0) rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor( sigmas).to(device) # -------------------------------- # (3) initialize x, and pre-calculation # -------------------------------- x = util.single2tensor4(img_L).to(device) img_L_tensor, k_tensor = util.single2tensor4( img_L), util.single2tensor4(np.expand_dims(k, 2)) [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor], device) FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf) # -------------------------------- # (4) main iterations # -------------------------------- for i in range(iter_num): # -------------------------------- # step 1, FFT # -------------------------------- tau = rhos[i].float().repeat(1, 1, 1, 1) x = sr.data_solution(x, FB, FBC, F2B, FBFy, tau, sf) if 'ircnn' in model_name: current_idx = np.int( np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1) if current_idx != former_idx: model.load_state_dict(model25[str(current_idx)], strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) former_idx = current_idx # -------------------------------- # step 2, denoiser # -------------------------------- if x8: x = util.augment_img_tensor4(x, i % 8) if 'drunet' in model_name: x = torch.cat((x, sigmas[i].float().repeat( 1, 1, x.shape[2], x.shape[3])), dim=1) x = utils_model.test_mode(model, x, mode=2, refield=32, min_size=256, modulo=16) elif 'ircnn' in model_name: x = model(x) if x8: if i % 8 == 3 or i % 8 == 5: x = util.augment_img_tensor4(x, 8 - i % 8) else: x = util.augment_img_tensor4(x, i % 8) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(x) if n_channels == 1: img_H = img_H.squeeze() if save_E: util.imsave( img_E, os.path.join( E_path, img_name + '_k' + str(k_index) + '_' + model_name + '.png')) # -------------------------------- # (4) img_LEH # -------------------------------- if save_LEH: img_L = util.single2uint(img_L) k_v = k / np.max(k) * 1.0 k_v = util.single2uint(np.tile(k_v[..., np.newaxis], [1, 1, 3])) k_v = cv2.resize(k_v, (3 * k_v.shape[1], 3 * k_v.shape[0]), interpolation=cv2.INTER_NEAREST) img_I = cv2.resize(img_L, (sf * img_L.shape[1], sf * img_L.shape[0]), interpolation=cv2.INTER_NEAREST) img_I[:k_v.shape[0], -k_v.shape[1]:, :] = k_v img_I[:img_L.shape[0], :img_L.shape[1], :] = img_L util.imshow(np.concatenate([img_I, img_E, img_H], axis=1), title='LR / Recovered / Ground-truth' ) if show_img else None util.imsave( np.concatenate([img_I, img_E, img_H], axis=1), os.path.join(E_path, img_name + '_k' + str(k_index) + '_LEH.png')) if save_L: util.imsave( util.single2uint(img_L), os.path.join(E_path, img_name + '_k' + str(k_index) + '_LR.png')) psnr = util.calculate_psnr( img_E, img_H, border=border) # change with your own border test_results['psnr'].append(psnr) logger.info('{:->4d}--> {:>10s} --k:{:>2d} PSNR: {:.2f}dB'.format( idx + 1, img_name + ext, k_index, psnr)) # -------------------------------- # Average PSNR # -------------------------------- ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) logger.info( '------> Average PSNR of ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB' .format(testset_name, k_index, noise_level_model, ave_psnr)) test_results_ave['psnr'].append(ave_psnr)
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- noise_level_img = 0/255.0 # set AWGN noise level for LR image, default: 0, noise_level_model = noise_level_img # setnoise level of model, default 0 model_name = 'drunet_color' # set denoiser, | 'drunet_color' | 'ircnn_gray' | 'drunet_gray' | 'ircnn_color' testset_name = 'srbsd68' # set test set, 'set5' | 'srbsd68' x8 = True # default: False, x8 to boost performance test_sf = [2] # set scale factor, default: [2, 3, 4], [2], [3], [4] iter_num = 24 # set number of iterations, default: 24 for SISR modelSigma1 = 49 # set sigma_1, default: 49 classical_degradation = True # set classical degradation or bicubic degradation show_img = False # default: False save_L = True # save LR image save_E = True # save estimated image save_LEH = False # save zoomed LR, E and H images task_current = 'sr' # 'sr' for super-resolution n_channels = 1 if 'gray' in model_name else 3 # fixed model_zoo = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + task_current + '_' + model_name model_path = os.path.join(model_zoo, model_name+'.pth') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torch.cuda.empty_cache() # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images E_path = os.path.join(results, result_name) # E_path, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log')) logger = logging.getLogger(logger_name) # ---------------------------------------- # load model # ---------------------------------------- if 'drunet' in model_name: from models.network_unet import UNetRes as net model = net(in_nc=n_channels+1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) elif 'ircnn' in model_name: from models.network_dncnn import IRCNN as net model = net(in_nc=n_channels, out_nc=n_channels, nc=64) model25 = torch.load(model_path) former_idx = 0 logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(model_name, noise_level_img, noise_level_model)) logger.info('Model path: {:s}'.format(model_path)) logger.info(L_path) L_paths = util.get_image_paths(L_path) # -------------------------------- # load kernel # -------------------------------- # kernels = hdf5storage.loadmat(os.path.join('kernels', 'Levin09.mat'))['kernels'] if classical_degradation: kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels'] else: kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernel_bicubicx234.mat'))['kernels'] test_results_ave = OrderedDict() test_results_ave['psnr_sf_k'] = [] test_results_ave['psnr_y_sf_k'] = [] for sf in test_sf: border = sf modelSigma2 = max(sf, noise_level_model*255.) k_num = 8 if classical_degradation else 1 for k_index in range(k_num): logger.info('--------- sf:{:>1d} --k:{:>2d} ---------'.format(sf, k_index)) test_results = OrderedDict() test_results['psnr'] = [] test_results['psnr_y'] = [] if not classical_degradation: # for bicubic degradation k_index = sf-2 k = kernels[0, k_index].astype(np.float64) util.surf(k) if show_img else None for idx, img in enumerate(L_paths): # -------------------------------- # (1) get img_L # -------------------------------- img_name, ext = os.path.splitext(os.path.basename(img)) img_H = util.imread_uint(img, n_channels=n_channels) img_H = util.modcrop(img_H, sf) # modcrop if classical_degradation: img_L = sr.classical_degradation(img_H, k, sf) util.imshow(img_L) if show_img else None img_L = util.uint2single(img_L) else: img_L = util.imresize_np(util.uint2single(img_H), 1/sf) np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN # -------------------------------- # (2) get rhos and sigmas # -------------------------------- rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255/255., noise_level_model), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1) rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to(device) # -------------------------------- # (3) initialize x, and pre-calculation # -------------------------------- x = cv2.resize(img_L, (img_L.shape[1]*sf, img_L.shape[0]*sf), interpolation=cv2.INTER_CUBIC) if np.ndim(x)==2: x = x[..., None] if classical_degradation: x = sr.shift_pixel(x, sf) x = util.single2tensor4(x).to(device) img_L_tensor, k_tensor = util.single2tensor4(img_L), util.single2tensor4(np.expand_dims(k, 2)) [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor], device) FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf) # -------------------------------- # (4) main iterations # -------------------------------- for i in range(iter_num): # -------------------------------- # step 1, FFT # -------------------------------- tau = rhos[i].float().repeat(1, 1, 1, 1) x = sr.data_solution(x.float(), FB, FBC, F2B, FBFy, tau, sf) if 'ircnn' in model_name: current_idx = np.int(np.ceil(sigmas[i].cpu().numpy()*255./2.)-1) if current_idx != former_idx: model.load_state_dict(model25[str(current_idx)], strict=True) model.eval() for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) former_idx = current_idx # -------------------------------- # step 2, denoiser # -------------------------------- if x8: x = util.augment_img_tensor4(x, i % 8) if 'drunet' in model_name: x = torch.cat((x, sigmas[i].float().repeat(1, 1, x.shape[2], x.shape[3])), dim=1) x = utils_model.test_mode(model, x, mode=2, refield=32, min_size=256, modulo=16) elif 'ircnn' in model_name: x = model(x) if x8: if i % 8 == 3 or i % 8 == 5: x = util.augment_img_tensor4(x, 8 - i % 8) else: x = util.augment_img_tensor4(x, i % 8) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(x) if save_E: util.imsave(img_E, os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_'+model_name+'.png')) if n_channels == 1: img_H = img_H.squeeze() # -------------------------------- # (4) img_LEH # -------------------------------- img_L = util.single2uint(img_L).squeeze() if save_LEH: k_v = k/np.max(k)*1.0 if n_channels==1: k_v = util.single2uint(k_v) else: k_v = util.single2uint(np.tile(k_v[..., np.newaxis], [1, 1, n_channels])) k_v = cv2.resize(k_v, (3*k_v.shape[1], 3*k_v.shape[0]), interpolation=cv2.INTER_NEAREST) img_I = cv2.resize(img_L, (sf*img_L.shape[1], sf*img_L.shape[0]), interpolation=cv2.INTER_NEAREST) img_I[:k_v.shape[0], -k_v.shape[1]:, ...] = k_v img_I[:img_L.shape[0], :img_L.shape[1], ...] = img_L util.imshow(np.concatenate([img_I, img_E, img_H], axis=1), title='LR / Recovered / Ground-truth') if show_img else None util.imsave(np.concatenate([img_I, img_E, img_H], axis=1), os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_LEH.png')) if save_L: util.imsave(img_L, os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_LR.png')) psnr = util.calculate_psnr(img_E, img_H, border=border) test_results['psnr'].append(psnr) logger.info('{:->4d}--> {:>10s} -- sf:{:>1d} --k:{:>2d} PSNR: {:.2f}dB'.format(idx+1, img_name+ext, sf, k_index, psnr)) if n_channels == 3: img_E_y = util.rgb2ycbcr(img_E, only_y=True) img_H_y = util.rgb2ycbcr(img_H, only_y=True) psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border) test_results['psnr_y'].append(psnr_y) # -------------------------------- # Average PSNR for all kernels # -------------------------------- ave_psnr_k = sum(test_results['psnr']) / len(test_results['psnr']) logger.info('------> Average PSNR(RGB) of ({}) scale factor: ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB'.format(testset_name, sf, k_index, noise_level_model, ave_psnr_k)) test_results_ave['psnr_sf_k'].append(ave_psnr_k) if n_channels == 3: # RGB image ave_psnr_y_k = sum(test_results['psnr_y']) / len(test_results['psnr_y']) logger.info('------> Average PSNR(Y) of ({}) scale factor: ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB'.format(testset_name, sf, k_index, noise_level_model, ave_psnr_y_k)) test_results_ave['psnr_y_sf_k'].append(ave_psnr_y_k) # --------------------------------------- # Average PSNR for all sf and kernels # --------------------------------------- ave_psnr_sf_k = sum(test_results_ave['psnr_sf_k']) / len(test_results_ave['psnr_sf_k']) logger.info('------> Average PSNR of ({}) {:.2f} dB'.format(testset_name, ave_psnr_sf_k)) if n_channels == 3: ave_psnr_y_sf_k = sum(test_results_ave['psnr_y_sf_k']) / len(test_results_ave['psnr_y_sf_k']) logger.info('------> Average PSNR of ({}) {:.2f} dB'.format(testset_name, ave_psnr_y_sf_k))