def main(): # ---------------------------------------- # Preparation # ---------------------------------------- model_name = 'usrnet' # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny' testset_name = 'set5' # test set, 'set5' | 'srbsd68' test_sf = [4] if 'gan' in model_name else [ 2, 3, 4 ] # scale factor, from {1,2,3,4} show_img = False # default: False save_L = True # save LR image save_E = True # save estimated image save_LEH = False # save zoomed LR, E and H images # ---------------------------------------- # load testing kernels # ---------------------------------------- # kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels.mat'))['kernels'] kernels = loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels'] n_channels = 1 if 'gray' in model_name else 3 # 3 for color image, 1 for grayscale image model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed noise_level_img = 0 # fixed: 0, noise level for LR image noise_level_model = noise_level_img # fixed, noise level of model, default 0 result_name = testset_name + '_' + model_name model_path = os.path.join(model_pool, model_name + '.pth') # ---------------------------------------- # L_path = H_path, E_path, logger # ---------------------------------------- L_path = os.path.join( testsets, testset_name) # L_path and H_path, fixed, for Low-quality images E_path = os.path.join(results, result_name) # E_path, fixed, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- if 'tiny' in model_name: model = net(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") else: model = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for key, v in model.named_parameters(): v.requires_grad = False number_parameters = sum(map(lambda x: x.numel(), model.parameters())) model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) logger.info('Params number: {}'.format(number_parameters)) logger.info('Model_name:{}, image sigma:{}'.format(model_name, noise_level_img)) logger.info(L_path) L_paths = util.get_image_paths(L_path) # -------------------------------- # read images # -------------------------------- test_results_ave = OrderedDict() test_results_ave['psnr_sf_k'] = [] for sf in test_sf: for k_index in range(kernels.shape[1]): test_results = OrderedDict() test_results['psnr'] = [] kernel = kernels[0, k_index].astype(np.float64) ## other kernels # kernel = utils_deblur.blurkernel_synthesis(h=25) # motion kernel # kernel = utils_deblur.fspecial('gaussian', 25, 1.6) # Gaussian kernel # kernel = sr.shift_pixel(kernel, sf) # pixel shift; optional # kernel /= np.sum(kernel) util.surf(kernel) if show_img else None idx = 0 for img in L_paths: # -------------------------------- # (1) classical degradation, img_L # -------------------------------- idx += 1 img_name, ext = os.path.splitext(os.path.basename(img)) img_H = util.imread_uint( img, n_channels=n_channels) # HR image, int8 img_H = util.modcrop(img_H, np.lcm(sf, 8)) # modcrop # generate degraded LR image img_L = ndimage.filters.convolve(img_H, kernel[..., np.newaxis], mode='wrap') # blur img_L = sr.downsample_np( img_L, sf, center=False) # downsample, standard s-fold downsampler img_L = util.uint2single(img_L) # uint2single np.random.seed(seed=0) # for reproducibility img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN util.imshow(util.single2uint(img_L)) if show_img else None x = util.single2tensor4(img_L) k = util.single2tensor4(kernel[..., np.newaxis]) sigma = torch.tensor(noise_level_model).float().view( [1, 1, 1, 1]) [x, k, sigma] = [el.to(device) for el in [x, k, sigma]] # -------------------------------- # (2) inference # -------------------------------- x = model(x, k, sf, sigma) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(x) if save_E: util.imsave( img_E, os.path.join( E_path, img_name + '_x' + str(sf) + '_k' + str(k_index + 1) + '_' + model_name + '.png')) # -------------------------------- # (4) img_LEH # -------------------------------- img_L = util.single2uint(img_L) if save_LEH: k_v = kernel / np.max(kernel) * 1.2 k_v = util.single2uint( np.tile(k_v[..., np.newaxis], [1, 1, 3])) k_v = cv2.resize(k_v, (3 * k_v.shape[1], 3 * k_v.shape[0]), interpolation=cv2.INTER_NEAREST) img_I = cv2.resize( img_L, (sf * img_L.shape[1], sf * img_L.shape[0]), interpolation=cv2.INTER_NEAREST) img_I[:k_v.shape[0], -k_v.shape[1]:, :] = k_v img_I[:img_L.shape[0], :img_L.shape[1], :] = img_L util.imshow(np.concatenate([img_I, img_E, img_H], axis=1), title='LR / Recovered / Ground-truth' ) if show_img else None util.imsave( np.concatenate([img_I, img_E, img_H], axis=1), os.path.join( E_path, img_name + '_x' + str(sf) + '_k' + str(k_index + 1) + '_LEH.png')) if save_L: util.imsave( img_L, os.path.join( E_path, img_name + '_x' + str(sf) + '_k' + str(k_index + 1) + '_LR.png')) psnr = util.calculate_psnr( img_E, img_H, border=sf**2) # change with your own border test_results['psnr'].append(psnr) logger.info( '{:->4d}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB'. format(idx, img_name + ext, sf, k_index, psnr)) ave_psnr_k = sum(test_results['psnr']) / len(test_results['psnr']) logger.info( '------> Average PSNR(RGB) of ({}) scale factor: ({}), kernel: ({}) sigma: ({}): {:.2f} dB' .format(testset_name, sf, k_index + 1, noise_level_model, ave_psnr_k)) test_results_ave['psnr_sf_k'].append(ave_psnr_k) logger.info(test_results_ave['psnr_sf_k'])
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- model_name = 'usrnet' # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny' testset_name = 'set5' # test set, 'set5' | 'srbsd68' need_degradation = True # default: True sf = 4 # scale factor, only from {2, 3, 4} show_img = False # default: False save_L = True # save LR image save_E = True # save estimated image # load approximated bicubic kernels #kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_bicubicx234.mat'))['kernels'] kernels = loadmat(os.path.join('kernels', 'kernels_bicubicx234.mat'))['kernels'] kernel = kernels[0, sf - 2].astype(np.float64) kernel = util.single2tensor4(kernel[..., np.newaxis]) task_current = 'sr' # fixed, 'sr' for super-resolution n_channels = 3 # fixed, 3 for color image model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed noise_level_img = 0 # fixed: 0, noise level for LR image noise_level_model = noise_level_img # fixed, noise level of model, default 0 result_name = testset_name + '_' + model_name + '_bicubic' border = sf if task_current == 'sr' else 0 # shave boader to calculate PSNR and SSIM model_path = os.path.join(model_pool, model_name + '.pth') # ---------------------------------------- # L_path, E_path, H_path # ---------------------------------------- L_path = os.path.join( testsets, testset_name) # L_path, fixed, for Low-quality images H_path = L_path # H_path, 'None' | L_path, for High-quality images E_path = os.path.join(results, result_name) # E_path, fixed, for Estimated images util.mkdir(E_path) if H_path == L_path: need_degradation = True logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) need_H = True if H_path is not None else False device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- from models.network_usrnet import USRNet as net # for pytorch version <= 1.7.1 # from models.network_usrnet_v1 import USRNet as net # for pytorch version >=1.8.1 if 'tiny' in model_name: model = net(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") else: model = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for key, v in model.named_parameters(): v.requires_grad = False number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) test_results = OrderedDict() test_results['psnr'] = [] test_results['ssim'] = [] test_results['psnr_y'] = [] test_results['ssim_y'] = [] logger.info('model_name:{}, image sigma:{}'.format(model_name, noise_level_img)) logger.info(L_path) L_paths = util.get_image_paths(L_path) H_paths = util.get_image_paths(H_path) if need_H else None for idx, img in enumerate(L_paths): # ------------------------------------ # (1) img_L # ------------------------------------ img_name, ext = os.path.splitext(os.path.basename(img)) logger.info('{:->4d}--> {:>10s}'.format(idx + 1, img_name + ext)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) # degradation process, bicubic downsampling if need_degradation: img_L = util.modcrop(img_L, sf) img_L = util.imresize_np(img_L, 1 / sf) # img_L = util.uint2single(util.single2uint(img_L)) # np.random.seed(seed=0) # for reproducibility # img_L += np.random.normal(0, noise_level_img/255., img_L.shape) w, h = img_L.shape[:2] if save_L: util.imsave( util.single2uint(img_L), os.path.join(E_path, img_name + '_LR_x' + str(sf) + '.png')) img = cv2.resize(img_L, (sf * h, sf * w), interpolation=cv2.INTER_NEAREST) img = utils_deblur.wrap_boundary_liu(img, [ int(np.ceil(sf * w / 8 + 2) * 8), int(np.ceil(sf * h / 8 + 2) * 8) ]) img_wrap = sr.downsample_np(img, sf, center=False) img_wrap[:w, :h, :] = img_L img_L = img_wrap util.imshow(util.single2uint(img_L), title='LR image with noise level {}'.format( noise_level_img)) if show_img else None img_L = util.single2tensor4(img_L) img_L = img_L.to(device) # ------------------------------------ # (2) img_E # ------------------------------------ sigma = torch.tensor(noise_level_model).float().view([1, 1, 1, 1]) [img_L, kernel, sigma] = [el.to(device) for el in [img_L, kernel, sigma]] img_E = model(img_L, kernel, sf, sigma) img_E = util.tensor2uint(img_E) img_E = img_E[:sf * w, :sf * h, :] if need_H: # -------------------------------- # (3) img_H # -------------------------------- img_H = util.imread_uint(H_paths[idx], n_channels=n_channels) img_H = img_H.squeeze() img_H = util.modcrop(img_H, sf) # -------------------------------- # PSNR and SSIM # -------------------------------- psnr = util.calculate_psnr(img_E, img_H, border=border) ssim = util.calculate_ssim(img_E, img_H, border=border) test_results['psnr'].append(psnr) test_results['ssim'].append(ssim) logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format( img_name + ext, psnr, ssim)) util.imshow(np.concatenate([img_E, img_H], axis=1), title='Recovered / Ground-truth') if show_img else None if np.ndim(img_H) == 3: # RGB image img_E_y = util.rgb2ycbcr(img_E, only_y=True) img_H_y = util.rgb2ycbcr(img_H, only_y=True) psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border) ssim_y = util.calculate_ssim(img_E_y, img_H_y, border=border) test_results['psnr_y'].append(psnr_y) test_results['ssim_y'].append(ssim_y) # ------------------------------------ # save results # ------------------------------------ if save_E: util.imsave( img_E, os.path.join( E_path, img_name + '_x' + str(sf) + '_' + model_name + '.png')) if need_H: ave_psnr = sum(test_results['psnr']) / len(test_results['psnr']) ave_ssim = sum(test_results['ssim']) / len(test_results['ssim']) logger.info( 'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}' .format(result_name, sf, ave_psnr, ave_ssim)) if np.ndim(img_H) == 3: ave_psnr_y = sum(test_results['psnr_y']) / len( test_results['psnr_y']) ave_ssim_y = sum(test_results['ssim_y']) / len( test_results['ssim_y']) logger.info( 'Average PSNR/SSIM( Y ) - {} - x{} - PSNR: {:.2f} dB; SSIM: {:.4f}' .format(result_name, sf, ave_psnr_y, ave_ssim_y))
def test_usrnet(self): sys.path.append('USRNet') from models.network_usrnet import USRNet as net # for pytorch version <= 1.7.1 from utils import utils_deblur from utils import utils_sisr as sr from utils import utils_image as util np.random.seed(324) torch.manual_seed(32) inp = np.random.standard_normal([1, 3, 56, 112]).astype(np.float32) sf = 4 # source: https://github.com/cszn/USRNet/blob/master/main_test_realapplication.py def get_kernel_sigma(): noise_level_img = 2 # noise level for LR image, 0.5~3 for clean images kernel_width_default_x1234 = [ 0.4, 0.7, 1.5, 2.0 ] # default Gaussian kernel widths of clean/sharp images for x1, x2, x3, x4 noise_level_model = noise_level_img / 255. # noise level of model kernel_width = kernel_width_default_x1234[sf - 1] k = utils_deblur.fspecial('gaussian', 25, kernel_width) k = sr.shift_pixel(k, sf) # shift the kernel k /= np.sum(k) kernel = util.single2tensor4(k[..., np.newaxis]) sigma = torch.tensor(noise_level_model).float().view([1, 1, 1, 1]) return kernel, sigma kernel, sigma = get_kernel_sigma() model = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") model.eval() ref = model(torch.tensor(inp), kernel, sf, sigma) # Forward random input through the model to check that nothing got stuck from reference data dummy_inp = torch.randn(inp.shape) dummy_kernel = torch.randn(kernel.shape) dummy_sigma = torch.randn(sigma.shape) model(dummy_inp, dummy_kernel, sf, dummy_sigma) # Generate OpenVINO IR mo_pytorch.convert( model, input_shape='[1, 3, 56, 112],[1, 1, 25, 25],[1, 1, 1, 1],[1]', input='x{f32},k{f32},sigma{f32},sf{f32}->4', model_name='model') # Run model with OpenVINO and compare outputs net = self.ie.read_network('model.xml') exec_net = self.ie.load_network(net, 'CPU') out = exec_net.infer({'x': inp, 'k': kernel, 'sigma': sigma}) out = next(iter(out.values())) diff = np.max(np.abs(ref.detach().numpy() - out)) self.assertLessEqual(diff, 1e-4)
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- model_name = 'usrnet' # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny' testset_name = 'set_real' # test set, 'set_real' test_image = 'chip.png' # 'chip.png', 'comic.png' #test_image = 'comic.png' sf = 4 # scale factor, only from {1, 2, 3, 4} show_img = False # default: False save_E = True # save estimated image save_LE = True # save zoomed LR, Estimated images # ---------------------------------------- # set noise level and kernel # ---------------------------------------- if 'chip' in test_image: noise_level_img = 15 # noise level for LR image, 15 for chip kernel_width_default_x1234 = [0.6, 0.9, 1.7, 2.2] # Gaussian kernel widths for x1, x2, x3, x4 else: noise_level_img = 2 # noise level for LR image, 0.5~3 for clean images kernel_width_default_x1234 = [0.4, 0.7, 1.5, 2.0] # default Gaussian kernel widths of clean/sharp images for x1, x2, x3, x4 noise_level_model = noise_level_img/255. # noise level of model kernel_width = kernel_width_default_x1234[sf-1] # set your own kernel width # kernel_width = 2.2 k = utils_deblur.fspecial('gaussian', 25, kernel_width) k = sr.shift_pixel(k, sf) # shift the kernel k /= np.sum(k) util.surf(k) if show_img else None # scio.savemat('kernel_realapplication.mat', {'kernel':k}) # load approximated bicubic kernels #kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernel_bicubicx234.mat'))['kernels'] # kernels = loadmat(os.path.join('kernels', 'kernel_bicubicx234.mat'))['kernels'] # kernel = kernels[0, sf-2].astype(np.float64) kernel = util.single2tensor4(k[..., np.newaxis]) n_channels = 1 if 'gray' in model_name else 3 # 3 for color image, 1 for grayscale image model_pool = 'model_zoo' # fixed testsets = 'testsets' # fixed results = 'results' # fixed result_name = testset_name + '_' + model_name model_path = os.path.join(model_pool, model_name+'.pth') # ---------------------------------------- # L_path, E_path # ---------------------------------------- L_path = os.path.join(testsets, testset_name) # L_path, fixed, for Low-quality images E_path = os.path.join(results, result_name) # E_path, fixed, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log')) logger = logging.getLogger(logger_name) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- if 'tiny' in model_name: model = net(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") else: model = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for key, v in model.named_parameters(): v.requires_grad = False number_parameters = sum(map(lambda x: x.numel(), model.parameters())) logger.info('Params number: {}'.format(number_parameters)) model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) logger.info('model_name:{}, image sigma:{}'.format(model_name, noise_level_img)) logger.info(L_path) img = os.path.join(L_path, test_image) # ------------------------------------ # (1) img_L # ------------------------------------ img_name, ext = os.path.splitext(os.path.basename(img)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) util.imshow(img_L) if show_img else None w, h = img_L.shape[:2] logger.info('{:>10s}--> ({:>4d}x{:<4d})'.format(img_name+ext, w, h)) # boundary handling boarder = 8 # default setting for kernel size 25x25 img = cv2.resize(img_L, (sf*h, sf*w), interpolation=cv2.INTER_NEAREST) img = utils_deblur.wrap_boundary_liu(img, [int(np.ceil(sf*w/boarder+2)*boarder), int(np.ceil(sf*h/boarder+2)*boarder)]) img_wrap = sr.downsample_np(img, sf, center=False) img_wrap[:w, :h, :] = img_L img_L = img_wrap util.imshow(util.single2uint(img_L), title='LR image with noise level {}'.format(noise_level_img)) if show_img else None img_L = util.single2tensor4(img_L) img_L = img_L.to(device) # ------------------------------------ # (2) img_E # ------------------------------------ sigma = torch.tensor(noise_level_model).float().view([1, 1, 1, 1]) [img_L, kernel, sigma] = [el.to(device) for el in [img_L, kernel, sigma]] img_E = model(img_L, kernel, sf, sigma) img_E = util.tensor2uint(img_E)[:sf*w, :sf*h, ...] if save_E: util.imsave(img_E, os.path.join(E_path, img_name+'_x'+str(sf)+'_'+model_name+'.png')) # -------------------------------- # (3) save img_LE # -------------------------------- if save_LE: k_v = k/np.max(k)*1.2 k_v = util.single2uint(np.tile(k_v[..., np.newaxis], [1, 1, 3])) k_factor = 3 k_v = cv2.resize(k_v, (k_factor*k_v.shape[1], k_factor*k_v.shape[0]), interpolation=cv2.INTER_NEAREST) img_L = util.tensor2uint(img_L)[:w, :h, ...] img_I = cv2.resize(img_L, (sf*img_L.shape[1], sf*img_L.shape[0]), interpolation=cv2.INTER_NEAREST) img_I[:k_v.shape[0], :k_v.shape[1], :] = k_v util.imshow(np.concatenate([img_I, img_E], axis=1), title='LR / Recovered') if show_img else None util.imsave(np.concatenate([img_I, img_E], axis=1), os.path.join(E_path, img_name+'_x'+str(sf)+'_'+model_name+'_LE.png'))
def main(): # ---------------------------------------- # Preparation # ---------------------------------------- model_name = 'usrnet_tiny' # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny' testset_name = 'srcvte' # test set, 'set5' | 'srbsd68' | 'srcvte' test_sf = [ 4 ] # if 'gan' in model_name else [2, 3, 4] # scale factor, from {1,2,3,4} load_kernels = False show_img = False # default: False save_L = False # save LR image save_E = True # save estimated image save_LEH = False # save zoomed LR, E and H images # ---------------------------------------- # load testing kernels # ---------------------------------------- # kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels.mat'))['kernels'] kernels = loadmat(os.path.join( 'kernels', 'kernels_12.mat'))['kernels'] if load_kernels else None n_channels = 1 if 'gray' in model_name else 3 # 3 for color image, 1 for grayscale image model_pool = '/home/dengzeshuai/pretrained_models/USRnet/' # fixed testsets = '/home/datasets/sr/' # fixed results = 'results' # fixed noise_level_img = 0 # fixed: 0, noise level for LR image noise_level_model = noise_level_img # fixed, noise level of model, default 0 result_name = testset_name + '_' + model_name + '_blur' model_path = os.path.join(model_pool, model_name + '.pth') # ---------------------------------------- # L_path = H_path, E_path, logger # ---------------------------------------- L_path = os.path.join( testsets, testset_name) # L_path and H_path, fixed, for Low-quality images if testset_name == 'srcvte': L_path = os.path.join(testsets, testset_name, 'LR_val') H_path = os.path.join(testsets, testset_name, 'HR_val') video_names = os.listdir(H_path) E_path = os.path.join(results, result_name) # E_path, fixed, for Estimated images util.mkdir(E_path) logger_name = result_name utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name + '.log')) logger = logging.getLogger(logger_name) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # ---------------------------------------- # load model # ---------------------------------------- if 'tiny' in model_name: model = net(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") else: model = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose") model.load_state_dict(torch.load(model_path), strict=True) model.eval() for key, v in model.named_parameters(): v.requires_grad = False number_parameters = sum(map(lambda x: x.numel(), model.parameters())) model = model.to(device) logger.info('Model path: {:s}'.format(model_path)) logger.info('Params number: {}'.format(number_parameters)) logger.info('Model_name:{}, image sigma:{}'.format(model_name, noise_level_img)) logger.info(L_path) L_paths = util.get_image_paths(L_path) need_H = True if H_path is not None else False H_paths = util.get_image_paths(H_path) if need_H else None # -------------------------------- # read images # -------------------------------- test_results_ave = OrderedDict() test_results_ave['psnr_sf_k'] = [] test_results_ave['ssim_sf_k'] = [] test_results_ave['psnr_y_sf_k'] = [] test_results_ave['ssim_y_sf_k'] = [] for sf in test_sf: loop = kernels.shape[1] if load_kernels else 1 for k_index in range(loop): test_results = OrderedDict() test_results['psnr'] = [] test_results['ssim'] = [] test_results['psnr_y'] = [] test_results['ssim_y'] = [] if load_kernels: kernel = kernels[0, k_index].astype(np.float64) else: ## other kernels # kernel = utils_deblur.blurkernel_synthesis(h=25) # motion kernel kernel = utils_deblur.fspecial('gaussian', 25, 1.6) # Gaussian kernel kernel = sr.shift_pixel(kernel, sf) # pixel shift; optional kernel /= np.sum(kernel) util.surf(kernel) if show_img else None # idx = 0 for idx, img in enumerate(L_paths): # -------------------------------- # (1) classical degradation, img_L # -------------------------------- img_name, ext = os.path.splitext(os.path.basename(img)) if testset_name == 'srcvte': video_name = os.path.basename(os.path.dirname(img)) img_L = util.imread_uint(img, n_channels=n_channels) img_L = util.uint2single(img_L) # generate degraded LR image # img_L = ndimage.filters.convolve(img_H, kernel[..., np.newaxis], mode='wrap') # blur # img_L = sr.downsample_np(img_L, sf, center=False) # downsample, standard s-fold downsampler # img_L = util.uint2single(img_L) # uint2single # np.random.seed(seed=0) # for reproducibility # img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN util.imshow(util.single2uint(img_L)) if show_img else None x = util.single2tensor4(img_L) k = util.single2tensor4(kernel[..., np.newaxis]) sigma = torch.tensor(noise_level_model).float().view( [1, 1, 1, 1]) [x, k, sigma] = [el.to(device) for el in [x, k, sigma]] # -------------------------------- # (2) inference # -------------------------------- x = model(x, k, sf, sigma) # -------------------------------- # (3) img_E # -------------------------------- img_E = util.tensor2uint(x) if save_E: if testset_name == 'srcvte': save_path = os.path.join(E_path, video_name) util.mkdir(save_path) # util.imsave(img_E, os.path.join(save_path, img_name+'_k'+str(k_index+1)+'.png')) util.imsave(img_E, os.path.join(save_path, img_name + '.png')) else: util.imsave( img_E, os.path.join( E_path, img_name + '_x' + str(sf) + '_k' + str(k_index + 1) + '_' + model_name + '.png')) # -------------------------------- # (4) img_H # -------------------------------- if need_H: img_H = util.imread_uint(H_paths[idx], n_channels=n_channels) img_H = img_H.squeeze() img_H = util.modcrop(img_H, sf) psnr = util.calculate_psnr( img_E, img_H, border=sf) # change with your own border ssim = util.calculate_ssim(img_E, img_H, border=sf) test_results['psnr'].append(psnr) test_results['ssim'].append(ssim) if np.ndim(img_H) == 3: # RGB image img_E_y = util.rgb2ycbcr(img_E, only_y=True) img_H_y = util.rgb2ycbcr(img_H, only_y=True) psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=sf) ssim_y = util.calculate_ssim(img_E_y, img_H_y, border=sf) test_results['psnr_y'].append(psnr_y) test_results['ssim_y'].append(ssim_y) logger.info( '{:->4d} --> {:>4s}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB SSIM: {:.4f}' .format(idx, video_name, img_name + ext, sf, k_index, psnr_y, ssim_y)) else: logger.info( '{:->4d} --> {:>4s}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB SSIM: {:.4f}' .format(idx, video_name, img_name + ext, sf, k_index, psnr, ssim)) if need_H: ave_psnr = sum(test_results['psnr']) / len( test_results['psnr']) ave_ssim = sum(test_results['ssim']) / len( test_results['ssim']) logger.info( 'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}' .format(result_name, sf, ave_psnr, ave_ssim)) logger.info( '------> Average PSNR(RGB) - {} - x{}, kernel:{} sigma:{} --PSNR: {:.2f} dB; SSIM: {:.4f}' .format(testset_name, sf, k_index + 1, noise_level_model, ave_psnr, ave_ssim)) if np.ndim(img_H) == 3: ave_psnr_y = sum(test_results['psnr_y']) / len( test_results['psnr_y']) ave_ssim_y = sum(test_results['ssim_y']) / len( test_results['ssim_y']) logger.info( '------> Average PSNR(Y) - {} - x{}, kernel:{} sigma:{} --PSNR: {:.2f} dB; SSIM: {:.4f}' .format(testset_name, sf, k_index + 1, noise_level_model, ave_psnr_y, ave_ssim_y)) test_results_ave['psnr_sf_k'].append(ave_psnr) test_results_ave['ssim_sf_k'].append(ave_ssim) if np.ndim(img_H) == 3: test_results_ave['psnr_y_sf_k'].append(ave_psnr_y) test_results_ave['ssim_y_sf_k'].append(ave_ssim_y) logger.info(test_results_ave['psnr_sf_k']) logger.info(test_results_ave['ssim_sf_k']) if np.ndim(img_H) == 3: logger.info(test_results_ave['psnr_y_sf_k']) logger.info(test_results_ave['ssim_y_sf_k'])