Пример #1
0
def show_pca(x):
    """
    x: PCA projection matrix, e.g., 15x225
    """
    for i in range(x.shape[0]):
        xc = np.reshape(x[i, :], (int(np.sqrt(x.shape[1])), -1), order="F")
        util.surf(xc)
Пример #2
0
def main():
    """
    # ----------------------------------------------------------------------------------
    # In real applications, you should set proper 
    # - "noise_level_img": from [3, 25], set 3 for clean image, try 15 for very noisy LR images
    # - "k" (or "kernel_width"): blur kernel is very important!!!  kernel_width from [0.6, 3.0]
    # to get the best performance.
    # ----------------------------------------------------------------------------------
    """
    ##############################################################################

    testset_name = 'Set3C'  # set test set,  'set5' | 'srbsd68'
    noise_level_img = 3  # set noise level of image, from [3, 25], set 3 for clean image
    model_name = 'drunet_color'  # 'ircnn_color'         # set denoiser, | 'drunet_color' | 'ircnn_gray' | 'drunet_gray' | 'ircnn_color'
    sf = 2  # set scale factor, 1, 2, 3, 4
    iter_num = 24  # set number of iterations, default: 24 for SISR

    # --------------------------------
    # set blur kernel
    # --------------------------------
    kernel_width_default_x1234 = [
        0.6, 0.9, 1.7, 2.2
    ]  # Gaussian kernel widths for x1, x2, x3, x4
    noise_level_model = noise_level_img / 255.  # noise level of model
    kernel_width = kernel_width_default_x1234[sf - 1]
    """
    # set your own kernel width !!!!!!!!!!
    """
    # kernel_width = 1.0

    k = utils_deblur.fspecial('gaussian', 25, kernel_width)
    k = sr.shift_pixel(k, sf)  # shift the kernel
    k /= np.sum(k)

    ##############################################################################

    show_img = False
    util.surf(k) if show_img else None
    x8 = True  # default: False, x8 to boost performance
    modelSigma1 = 49  # set sigma_1, default: 49
    modelSigma2 = max(sf, noise_level_model * 255.)
    classical_degradation = True  # set classical degradation or bicubic degradation

    task_current = 'sr'  # 'sr' for super-resolution
    n_channels = 1 if 'gray' in model_name else 3  # fixed
    model_zoo = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    result_name = testset_name + '_realapplications_' + task_current + '_' + model_name
    model_path = os.path.join(model_zoo, model_name + '.pth')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.cuda.empty_cache()

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------
    L_path = os.path.join(testsets,
                          testset_name)  # L_path, for Low-quality images
    E_path = os.path.join(results, result_name)  # E_path, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    # ----------------------------------------
    # load model
    # ----------------------------------------
    if 'drunet' in model_name:
        from models.network_unet import UNetRes as net
        model = net(in_nc=n_channels + 1,
                    out_nc=n_channels,
                    nc=[64, 128, 256, 512],
                    nb=4,
                    act_mode='R',
                    downsample_mode="strideconv",
                    upsample_mode="convtranspose")
        model.load_state_dict(torch.load(model_path), strict=True)
        model.eval()
        for _, v in model.named_parameters():
            v.requires_grad = False
        model = model.to(device)
    elif 'ircnn' in model_name:
        from models.network_dncnn import IRCNN as net
        model = net(in_nc=n_channels, out_nc=n_channels, nc=64)
        model25 = torch.load(model_path)
        former_idx = 0

    logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(
        model_name, noise_level_img, noise_level_model))
    logger.info('Model path: {:s}'.format(model_path))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    for idx, img in enumerate(L_paths):

        # --------------------------------
        # (1) get img_L
        # --------------------------------
        logger.info('Model path: {:s} Image: {:s}'.format(model_path, img))
        img_name, ext = os.path.splitext(os.path.basename(img))
        img_L = util.imread_uint(img, n_channels=n_channels)
        img_L = util.uint2single(img_L)
        img_L = util.modcrop(img_L, 8)  # modcrop

        # --------------------------------
        # (2) get rhos and sigmas
        # --------------------------------
        rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255.,
                                                   noise_level_model),
                                         iter_num=iter_num,
                                         modelSigma1=modelSigma1,
                                         modelSigma2=modelSigma2,
                                         w=1)
        rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to(
            device)

        # --------------------------------
        # (3) initialize x, and pre-calculation
        # --------------------------------
        x = cv2.resize(img_L, (img_L.shape[1] * sf, img_L.shape[0] * sf),
                       interpolation=cv2.INTER_CUBIC)

        if np.ndim(x) == 2:
            x = x[..., None]

        if classical_degradation:
            x = sr.shift_pixel(x, sf)
        x = util.single2tensor4(x).to(device)

        img_L_tensor, k_tensor = util.single2tensor4(
            img_L), util.single2tensor4(np.expand_dims(k, 2))
        [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor],
                                                 device)
        FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf)

        # --------------------------------
        # (4) main iterations
        # --------------------------------
        for i in range(iter_num):

            print('Iter: {} / {}'.format(i, iter_num))

            # --------------------------------
            # step 1, FFT
            # --------------------------------
            tau = rhos[i].float().repeat(1, 1, 1, 1)
            x = sr.data_solution(x, FB, FBC, F2B, FBFy, tau, sf)

            if 'ircnn' in model_name:
                current_idx = np.int(
                    np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1)

                if current_idx != former_idx:
                    model.load_state_dict(model25[str(current_idx)],
                                          strict=True)
                    model.eval()
                    for _, v in model.named_parameters():
                        v.requires_grad = False
                    model = model.to(device)
                former_idx = current_idx

            # --------------------------------
            # step 2, denoiser
            # --------------------------------
            if x8:
                x = util.augment_img_tensor4(x, i % 8)

            if 'drunet' in model_name:
                x = torch.cat(
                    (x, sigmas[i].repeat(1, 1, x.shape[2], x.shape[3])), dim=1)
                x = utils_model.test_mode(model,
                                          x,
                                          mode=2,
                                          refield=64,
                                          min_size=256,
                                          modulo=16)
            elif 'ircnn' in model_name:
                x = model(x)

            if x8:
                if i % 8 == 3 or i % 8 == 5:
                    x = util.augment_img_tensor4(x, 8 - i % 8)
                else:
                    x = util.augment_img_tensor4(x, i % 8)

        # --------------------------------
        # (3) img_E
        # --------------------------------
        img_E = util.tensor2uint(x)
        util.imsave(
            img_E,
            os.path.join(E_path, img_name + '_x' + str(sf) + '_' + model_name +
                         '.png'))
Пример #3
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------
    model_name = 'usrnet'  # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny'
    testset_name = 'set5'  # test set,  'set5' | 'srbsd68'
    test_sf = [4] if 'gan' in model_name else [
        2, 3, 4
    ]  # scale factor, from {1,2,3,4}

    show_img = False  # default: False
    save_L = True  # save LR image
    save_E = True  # save estimated image
    save_LEH = False  # save zoomed LR, E and H images

    # ----------------------------------------
    # load testing kernels
    # ----------------------------------------
    # kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels.mat'))['kernels']
    kernels = loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']

    n_channels = 1 if 'gray' in model_name else 3  # 3 for color image, 1 for grayscale image
    model_pool = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    noise_level_img = 0  # fixed: 0, noise level for LR image
    noise_level_model = noise_level_img  # fixed, noise level of model, default 0
    result_name = testset_name + '_' + model_name
    model_path = os.path.join(model_pool, model_name + '.pth')

    # ----------------------------------------
    # L_path = H_path, E_path, logger
    # ----------------------------------------
    L_path = os.path.join(
        testsets,
        testset_name)  # L_path and H_path, fixed, for Low-quality images
    E_path = os.path.join(results,
                          result_name)  # E_path, fixed, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------
    if 'tiny' in model_name:
        model = net(n_iter=6,
                    h_nc=32,
                    in_nc=4,
                    out_nc=3,
                    nc=[16, 32, 64, 64],
                    nb=2,
                    act_mode="R",
                    downsample_mode='strideconv',
                    upsample_mode="convtranspose")
    else:
        model = net(n_iter=8,
                    h_nc=64,
                    in_nc=4,
                    out_nc=3,
                    nc=[64, 128, 256, 512],
                    nb=2,
                    act_mode="R",
                    downsample_mode='strideconv',
                    upsample_mode="convtranspose")

    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for key, v in model.named_parameters():
        v.requires_grad = False
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    model = model.to(device)

    logger.info('Model path: {:s}'.format(model_path))
    logger.info('Params number: {}'.format(number_parameters))
    logger.info('Model_name:{}, image sigma:{}'.format(model_name,
                                                       noise_level_img))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    # --------------------------------
    # read images
    # --------------------------------
    test_results_ave = OrderedDict()
    test_results_ave['psnr_sf_k'] = []

    for sf in test_sf:

        for k_index in range(kernels.shape[1]):

            test_results = OrderedDict()
            test_results['psnr'] = []
            kernel = kernels[0, k_index].astype(np.float64)

            ## other kernels
            # kernel = utils_deblur.blurkernel_synthesis(h=25)  # motion kernel
            # kernel = utils_deblur.fspecial('gaussian', 25, 1.6) # Gaussian kernel
            # kernel = sr.shift_pixel(kernel, sf)  # pixel shift; optional
            # kernel /= np.sum(kernel)

            util.surf(kernel) if show_img else None
            idx = 0

            for img in L_paths:

                # --------------------------------
                # (1) classical degradation, img_L
                # --------------------------------
                idx += 1
                img_name, ext = os.path.splitext(os.path.basename(img))
                img_H = util.imread_uint(
                    img, n_channels=n_channels)  # HR image, int8
                img_H = util.modcrop(img_H, np.lcm(sf, 8))  # modcrop

                # generate degraded LR image
                img_L = ndimage.filters.convolve(img_H,
                                                 kernel[..., np.newaxis],
                                                 mode='wrap')  # blur
                img_L = sr.downsample_np(
                    img_L, sf,
                    center=False)  # downsample, standard s-fold downsampler
                img_L = util.uint2single(img_L)  # uint2single

                np.random.seed(seed=0)  # for reproducibility
                img_L += np.random.normal(0, noise_level_img,
                                          img_L.shape)  # add AWGN

                util.imshow(util.single2uint(img_L)) if show_img else None

                x = util.single2tensor4(img_L)
                k = util.single2tensor4(kernel[..., np.newaxis])
                sigma = torch.tensor(noise_level_model).float().view(
                    [1, 1, 1, 1])
                [x, k, sigma] = [el.to(device) for el in [x, k, sigma]]

                # --------------------------------
                # (2) inference
                # --------------------------------
                x = model(x, k, sf, sigma)

                # --------------------------------
                # (3) img_E
                # --------------------------------
                img_E = util.tensor2uint(x)

                if save_E:
                    util.imsave(
                        img_E,
                        os.path.join(
                            E_path, img_name + '_x' + str(sf) + '_k' +
                            str(k_index + 1) + '_' + model_name + '.png'))

                # --------------------------------
                # (4) img_LEH
                # --------------------------------
                img_L = util.single2uint(img_L)
                if save_LEH:
                    k_v = kernel / np.max(kernel) * 1.2
                    k_v = util.single2uint(
                        np.tile(k_v[..., np.newaxis], [1, 1, 3]))
                    k_v = cv2.resize(k_v, (3 * k_v.shape[1], 3 * k_v.shape[0]),
                                     interpolation=cv2.INTER_NEAREST)
                    img_I = cv2.resize(
                        img_L, (sf * img_L.shape[1], sf * img_L.shape[0]),
                        interpolation=cv2.INTER_NEAREST)
                    img_I[:k_v.shape[0], -k_v.shape[1]:, :] = k_v
                    img_I[:img_L.shape[0], :img_L.shape[1], :] = img_L
                    util.imshow(np.concatenate([img_I, img_E, img_H], axis=1),
                                title='LR / Recovered / Ground-truth'
                                ) if show_img else None
                    util.imsave(
                        np.concatenate([img_I, img_E, img_H], axis=1),
                        os.path.join(
                            E_path, img_name + '_x' + str(sf) + '_k' +
                            str(k_index + 1) + '_LEH.png'))

                if save_L:
                    util.imsave(
                        img_L,
                        os.path.join(
                            E_path, img_name + '_x' + str(sf) + '_k' +
                            str(k_index + 1) + '_LR.png'))

                psnr = util.calculate_psnr(
                    img_E, img_H, border=sf**2)  # change with your own border
                test_results['psnr'].append(psnr)
                logger.info(
                    '{:->4d}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB'.
                    format(idx, img_name + ext, sf, k_index, psnr))

            ave_psnr_k = sum(test_results['psnr']) / len(test_results['psnr'])
            logger.info(
                '------> Average PSNR(RGB) of ({}) scale factor: ({}), kernel: ({}) sigma: ({}): {:.2f} dB'
                .format(testset_name, sf, k_index + 1, noise_level_model,
                        ave_psnr_k))
            test_results_ave['psnr_sf_k'].append(ave_psnr_k)
    logger.info(test_results_ave['psnr_sf_k'])
Пример #4
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------
    model_name = 'usrnet'      # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny'
    testset_name = 'set_real'  # test set,  'set_real'
    test_image = 'chip.png'    # 'chip.png', 'comic.png'
    #test_image = 'comic.png'

    sf = 4                     # scale factor, only from {1, 2, 3, 4}
    show_img = False           # default: False
    save_E = True              # save estimated image
    save_LE = True             # save zoomed LR, Estimated images

    # ----------------------------------------
    # set noise level and kernel
    # ----------------------------------------
    if 'chip' in test_image:
        noise_level_img = 15       # noise level for LR image, 15 for chip
        kernel_width_default_x1234 = [0.6, 0.9, 1.7, 2.2] # Gaussian kernel widths for x1, x2, x3, x4
    else:
        noise_level_img = 2       # noise level for LR image, 0.5~3 for clean images
        kernel_width_default_x1234 = [0.4, 0.7, 1.5, 2.0] # default Gaussian kernel widths of clean/sharp images for x1, x2, x3, x4

    noise_level_model = noise_level_img/255.  # noise level of model
    kernel_width = kernel_width_default_x1234[sf-1]

    # set your own kernel width
    # kernel_width = 2.2

    k = utils_deblur.fspecial('gaussian', 25, kernel_width)
    k = sr.shift_pixel(k, sf)  # shift the kernel
    k /= np.sum(k)
    util.surf(k) if show_img else None
    # scio.savemat('kernel_realapplication.mat', {'kernel':k})

    # load approximated bicubic kernels
    #kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernel_bicubicx234.mat'))['kernels']
#    kernels = loadmat(os.path.join('kernels', 'kernel_bicubicx234.mat'))['kernels']
#    kernel = kernels[0, sf-2].astype(np.float64)

    kernel = util.single2tensor4(k[..., np.newaxis])


    n_channels = 1 if 'gray' in  model_name else 3  # 3 for color image, 1 for grayscale image
    model_pool = 'model_zoo'  # fixed
    testsets = 'testsets'     # fixed
    results = 'results'       # fixed
    result_name = testset_name + '_' + model_name
    model_path = os.path.join(model_pool, model_name+'.pth')

    # ----------------------------------------
    # L_path, E_path
    # ----------------------------------------
    L_path = os.path.join(testsets, testset_name) # L_path, fixed, for Low-quality images
    E_path = os.path.join(results, result_name)   # E_path, fixed, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log'))
    logger = logging.getLogger(logger_name)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------
    if 'tiny' in model_name:
        model = net(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64],
                    nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose")
    else:
        model = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512],
                    nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose")

    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for key, v in model.named_parameters():
        v.requires_grad = False

    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    logger.info('Params number: {}'.format(number_parameters))
    model = model.to(device)
    logger.info('Model path: {:s}'.format(model_path))

    logger.info('model_name:{}, image sigma:{}'.format(model_name, noise_level_img))
    logger.info(L_path)

    img = os.path.join(L_path, test_image)
    # ------------------------------------
    # (1) img_L
    # ------------------------------------
    img_name, ext = os.path.splitext(os.path.basename(img))
    img_L = util.imread_uint(img, n_channels=n_channels)
    img_L = util.uint2single(img_L)

    util.imshow(img_L) if show_img else None
    w, h = img_L.shape[:2]
    logger.info('{:>10s}--> ({:>4d}x{:<4d})'.format(img_name+ext, w, h))

    # boundary handling
    boarder = 8     # default setting for kernel size 25x25
    img = cv2.resize(img_L, (sf*h, sf*w), interpolation=cv2.INTER_NEAREST)
    img = utils_deblur.wrap_boundary_liu(img, [int(np.ceil(sf*w/boarder+2)*boarder), int(np.ceil(sf*h/boarder+2)*boarder)])
    img_wrap = sr.downsample_np(img, sf, center=False)
    img_wrap[:w, :h, :] = img_L
    img_L = img_wrap

    util.imshow(util.single2uint(img_L), title='LR image with noise level {}'.format(noise_level_img)) if show_img else None

    img_L = util.single2tensor4(img_L)
    img_L = img_L.to(device)

    # ------------------------------------
    # (2) img_E
    # ------------------------------------
    sigma = torch.tensor(noise_level_model).float().view([1, 1, 1, 1])
    [img_L, kernel, sigma] = [el.to(device) for el in [img_L, kernel, sigma]]

    img_E = model(img_L, kernel, sf, sigma)

    img_E = util.tensor2uint(img_E)[:sf*w, :sf*h, ...]

    if save_E:
        util.imsave(img_E, os.path.join(E_path, img_name+'_x'+str(sf)+'_'+model_name+'.png'))

    # --------------------------------
    # (3) save img_LE
    # --------------------------------
    if save_LE:
        k_v = k/np.max(k)*1.2
        k_v = util.single2uint(np.tile(k_v[..., np.newaxis], [1, 1, 3]))
        k_factor = 3
        k_v = cv2.resize(k_v, (k_factor*k_v.shape[1], k_factor*k_v.shape[0]), interpolation=cv2.INTER_NEAREST)
        img_L = util.tensor2uint(img_L)[:w, :h, ...]
        img_I = cv2.resize(img_L, (sf*img_L.shape[1], sf*img_L.shape[0]), interpolation=cv2.INTER_NEAREST)
        img_I[:k_v.shape[0], :k_v.shape[1], :] = k_v
        util.imshow(np.concatenate([img_I, img_E], axis=1), title='LR / Recovered') if show_img else None
        util.imsave(np.concatenate([img_I, img_E], axis=1), os.path.join(E_path, img_name+'_x'+str(sf)+'_'+model_name+'_LE.png'))
Пример #5
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------

    noise_level_img = 0/255.0            # set AWGN noise level for LR image, default: 0, 
    noise_level_model = noise_level_img  # setnoise level of model, default 0
    model_name = 'drunet_color'          # set denoiser, | 'drunet_color' | 'ircnn_gray' | 'drunet_gray' | 'ircnn_color'
    testset_name = 'srbsd68'             # set test set,  'set5' | 'srbsd68'
    x8 = True                            # default: False, x8 to boost performance
    test_sf = [2]                        # set scale factor, default: [2, 3, 4], [2], [3], [4]
    iter_num = 24                        # set number of iterations, default: 24 for SISR
    modelSigma1 = 49                     # set sigma_1, default: 49
    classical_degradation = True         # set classical degradation or bicubic degradation

    show_img = False                     # default: False
    save_L = True                        # save LR image
    save_E = True                        # save estimated image
    save_LEH = False                     # save zoomed LR, E and H images

    task_current = 'sr'                  # 'sr' for super-resolution
    n_channels = 1 if 'gray' in model_name else 3  # fixed
    model_zoo = 'model_zoo'              # fixed
    testsets = 'testsets'                # fixed
    results = 'results'                  # fixed
    result_name = testset_name + '_' + task_current + '_' + model_name
    model_path = os.path.join(model_zoo, model_name+'.pth')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.cuda.empty_cache()

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------

    L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images
    E_path = os.path.join(results, result_name)   # E_path, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log'))
    logger = logging.getLogger(logger_name)

    # ----------------------------------------
    # load model
    # ----------------------------------------

    if 'drunet' in model_name:
        from models.network_unet import UNetRes as net
        model = net(in_nc=n_channels+1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose")
        model.load_state_dict(torch.load(model_path), strict=True)
        model.eval()
        for _, v in model.named_parameters():
            v.requires_grad = False
        model = model.to(device)
    elif 'ircnn' in model_name:
        from models.network_dncnn import IRCNN as net
        model = net(in_nc=n_channels, out_nc=n_channels, nc=64)
        model25 = torch.load(model_path)
        former_idx = 0

    logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(model_name, noise_level_img, noise_level_model))
    logger.info('Model path: {:s}'.format(model_path))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    # --------------------------------
    # load kernel
    # --------------------------------

    # kernels = hdf5storage.loadmat(os.path.join('kernels', 'Levin09.mat'))['kernels']
    if classical_degradation:
        kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']
    else:
        kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernel_bicubicx234.mat'))['kernels']

    test_results_ave = OrderedDict()
    test_results_ave['psnr_sf_k'] = []
    test_results_ave['psnr_y_sf_k'] = []

    for sf in test_sf:
        border = sf
        modelSigma2 = max(sf, noise_level_model*255.)
        k_num = 8 if classical_degradation else 1

        for k_index in range(k_num):
            logger.info('--------- sf:{:>1d} --k:{:>2d} ---------'.format(sf, k_index))
            test_results = OrderedDict()
            test_results['psnr'] = []
            test_results['psnr_y'] = []

            if not classical_degradation:  # for bicubic degradation
                k_index = sf-2
            k = kernels[0, k_index].astype(np.float64)

            util.surf(k) if show_img else None

            for idx, img in enumerate(L_paths):

                # --------------------------------
                # (1) get img_L
                # --------------------------------

                img_name, ext = os.path.splitext(os.path.basename(img))
                img_H = util.imread_uint(img, n_channels=n_channels)
                img_H = util.modcrop(img_H, sf)  # modcrop

                if classical_degradation:
                    img_L = sr.classical_degradation(img_H, k, sf)
                    util.imshow(img_L) if show_img else None
                    img_L = util.uint2single(img_L)
                else:
                    img_L = util.imresize_np(util.uint2single(img_H), 1/sf)

                np.random.seed(seed=0)  # for reproducibility
                img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN

                # --------------------------------
                # (2) get rhos and sigmas
                # --------------------------------

                rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255/255., noise_level_model), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1)
                rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to(device)

                # --------------------------------
                # (3) initialize x, and pre-calculation
                # --------------------------------

                x = cv2.resize(img_L, (img_L.shape[1]*sf, img_L.shape[0]*sf), interpolation=cv2.INTER_CUBIC)
                if np.ndim(x)==2:
                    x = x[..., None]

                if classical_degradation:
                    x = sr.shift_pixel(x, sf)
                x = util.single2tensor4(x).to(device)

                img_L_tensor, k_tensor = util.single2tensor4(img_L), util.single2tensor4(np.expand_dims(k, 2))
                [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor], device)
                FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf)

                # --------------------------------
                # (4) main iterations
                # --------------------------------

                for i in range(iter_num):

                    # --------------------------------
                    # step 1, FFT
                    # --------------------------------

                    tau = rhos[i].float().repeat(1, 1, 1, 1)
                    x = sr.data_solution(x.float(), FB, FBC, F2B, FBFy, tau, sf)

                    if 'ircnn' in model_name:
                        current_idx = np.int(np.ceil(sigmas[i].cpu().numpy()*255./2.)-1)
            
                        if current_idx != former_idx:
                            model.load_state_dict(model25[str(current_idx)], strict=True)
                            model.eval()
                            for _, v in model.named_parameters():
                                v.requires_grad = False
                            model = model.to(device)
                        former_idx = current_idx

                    # --------------------------------
                    # step 2, denoiser
                    # --------------------------------

                    if x8:
                        x = util.augment_img_tensor4(x, i % 8)
                        
                    if 'drunet' in model_name:
                        x = torch.cat((x, sigmas[i].float().repeat(1, 1, x.shape[2], x.shape[3])), dim=1)
                        x = utils_model.test_mode(model, x, mode=2, refield=32, min_size=256, modulo=16)
                    elif 'ircnn' in model_name:
                        x = model(x)

                    if x8:
                        if i % 8 == 3 or i % 8 == 5:
                            x = util.augment_img_tensor4(x, 8 - i % 8)
                        else:
                            x = util.augment_img_tensor4(x, i % 8)

                # --------------------------------
                # (3) img_E
                # --------------------------------

                img_E = util.tensor2uint(x)

                if save_E:
                    util.imsave(img_E, os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_'+model_name+'.png'))

                if n_channels == 1:
                    img_H = img_H.squeeze()

                # --------------------------------
                # (4) img_LEH
                # --------------------------------

                img_L = util.single2uint(img_L).squeeze()

                if save_LEH:
                    k_v = k/np.max(k)*1.0
                    if n_channels==1:
                        k_v = util.single2uint(k_v)
                    else:
                        k_v = util.single2uint(np.tile(k_v[..., np.newaxis], [1, 1, n_channels]))
                    k_v = cv2.resize(k_v, (3*k_v.shape[1], 3*k_v.shape[0]), interpolation=cv2.INTER_NEAREST)
                    img_I = cv2.resize(img_L, (sf*img_L.shape[1], sf*img_L.shape[0]), interpolation=cv2.INTER_NEAREST)
                    img_I[:k_v.shape[0], -k_v.shape[1]:, ...] = k_v
                    img_I[:img_L.shape[0], :img_L.shape[1], ...] = img_L
                    util.imshow(np.concatenate([img_I, img_E, img_H], axis=1), title='LR / Recovered / Ground-truth') if show_img else None
                    util.imsave(np.concatenate([img_I, img_E, img_H], axis=1), os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_LEH.png'))

                if save_L:
                    util.imsave(img_L, os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_LR.png'))

                psnr = util.calculate_psnr(img_E, img_H, border=border)
                test_results['psnr'].append(psnr)
                logger.info('{:->4d}--> {:>10s} -- sf:{:>1d} --k:{:>2d} PSNR: {:.2f}dB'.format(idx+1, img_name+ext, sf, k_index, psnr))

                if n_channels == 3:
                    img_E_y = util.rgb2ycbcr(img_E, only_y=True)
                    img_H_y = util.rgb2ycbcr(img_H, only_y=True)
                    psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border)
                    test_results['psnr_y'].append(psnr_y)

            # --------------------------------
            # Average PSNR for all kernels
            # --------------------------------

            ave_psnr_k = sum(test_results['psnr']) / len(test_results['psnr'])
            logger.info('------> Average PSNR(RGB) of ({}) scale factor: ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB'.format(testset_name, sf, k_index, noise_level_model, ave_psnr_k))
            test_results_ave['psnr_sf_k'].append(ave_psnr_k)

            if n_channels == 3:  # RGB image
                ave_psnr_y_k = sum(test_results['psnr_y']) / len(test_results['psnr_y'])
                logger.info('------> Average PSNR(Y) of ({}) scale factor: ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB'.format(testset_name, sf, k_index, noise_level_model, ave_psnr_y_k))
                test_results_ave['psnr_y_sf_k'].append(ave_psnr_y_k)

    # ---------------------------------------
    # Average PSNR for all sf and kernels
    # ---------------------------------------

    ave_psnr_sf_k = sum(test_results_ave['psnr_sf_k']) / len(test_results_ave['psnr_sf_k'])
    logger.info('------> Average PSNR of ({}) {:.2f} dB'.format(testset_name, ave_psnr_sf_k))
    if n_channels == 3:
        ave_psnr_y_sf_k = sum(test_results_ave['psnr_y_sf_k']) / len(test_results_ave['psnr_y_sf_k'])
        logger.info('------> Average PSNR of ({}) {:.2f} dB'.format(testset_name, ave_psnr_y_sf_k))
Пример #6
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------
    model_name = 'usrnet_tiny'  # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny'
    testset_name = 'srcvte'  # test set,  'set5' | 'srbsd68' | 'srcvte'
    test_sf = [
        4
    ]  # if 'gan' in model_name else [2, 3, 4]  # scale factor, from {1,2,3,4}

    load_kernels = False
    show_img = False  # default: False
    save_L = False  # save LR image
    save_E = True  # save estimated image
    save_LEH = False  # save zoomed LR, E and H images

    # ----------------------------------------
    # load testing kernels
    # ----------------------------------------
    # kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels.mat'))['kernels']
    kernels = loadmat(os.path.join(
        'kernels', 'kernels_12.mat'))['kernels'] if load_kernels else None

    n_channels = 1 if 'gray' in model_name else 3  # 3 for color image, 1 for grayscale image
    model_pool = '/home/dengzeshuai/pretrained_models/USRnet/'  # fixed
    testsets = '/home/datasets/sr/'  # fixed
    results = 'results'  # fixed
    noise_level_img = 0  # fixed: 0, noise level for LR image
    noise_level_model = noise_level_img  # fixed, noise level of model, default 0
    result_name = testset_name + '_' + model_name + '_blur'
    model_path = os.path.join(model_pool, model_name + '.pth')

    # ----------------------------------------
    # L_path = H_path, E_path, logger
    # ----------------------------------------
    L_path = os.path.join(
        testsets,
        testset_name)  # L_path and H_path, fixed, for Low-quality images
    if testset_name == 'srcvte':
        L_path = os.path.join(testsets, testset_name, 'LR_val')
        H_path = os.path.join(testsets, testset_name, 'HR_val')
        video_names = os.listdir(H_path)
    E_path = os.path.join(results,
                          result_name)  # E_path, fixed, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------
    if 'tiny' in model_name:
        model = net(n_iter=6,
                    h_nc=32,
                    in_nc=4,
                    out_nc=3,
                    nc=[16, 32, 64, 64],
                    nb=2,
                    act_mode="R",
                    downsample_mode='strideconv',
                    upsample_mode="convtranspose")
    else:
        model = net(n_iter=8,
                    h_nc=64,
                    in_nc=4,
                    out_nc=3,
                    nc=[64, 128, 256, 512],
                    nb=2,
                    act_mode="R",
                    downsample_mode='strideconv',
                    upsample_mode="convtranspose")

    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for key, v in model.named_parameters():
        v.requires_grad = False
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    model = model.to(device)

    logger.info('Model path: {:s}'.format(model_path))
    logger.info('Params number: {}'.format(number_parameters))
    logger.info('Model_name:{}, image sigma:{}'.format(model_name,
                                                       noise_level_img))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)
    need_H = True if H_path is not None else False
    H_paths = util.get_image_paths(H_path) if need_H else None

    # --------------------------------
    # read images
    # --------------------------------
    test_results_ave = OrderedDict()
    test_results_ave['psnr_sf_k'] = []
    test_results_ave['ssim_sf_k'] = []
    test_results_ave['psnr_y_sf_k'] = []
    test_results_ave['ssim_y_sf_k'] = []

    for sf in test_sf:
        loop = kernels.shape[1] if load_kernels else 1
        for k_index in range(loop):

            test_results = OrderedDict()
            test_results['psnr'] = []
            test_results['ssim'] = []
            test_results['psnr_y'] = []
            test_results['ssim_y'] = []

            if load_kernels:
                kernel = kernels[0, k_index].astype(np.float64)
            else:
                ## other kernels
                # kernel = utils_deblur.blurkernel_synthesis(h=25)  # motion kernel
                kernel = utils_deblur.fspecial('gaussian', 25,
                                               1.6)  # Gaussian kernel
                kernel = sr.shift_pixel(kernel, sf)  # pixel shift; optional
                kernel /= np.sum(kernel)

            util.surf(kernel) if show_img else None
            # idx = 0

            for idx, img in enumerate(L_paths):

                # --------------------------------
                # (1) classical degradation, img_L
                # --------------------------------

                img_name, ext = os.path.splitext(os.path.basename(img))
                if testset_name == 'srcvte':
                    video_name = os.path.basename(os.path.dirname(img))
                img_L = util.imread_uint(img, n_channels=n_channels)
                img_L = util.uint2single(img_L)

                # generate degraded LR image
                # img_L = ndimage.filters.convolve(img_H, kernel[..., np.newaxis], mode='wrap')  # blur
                # img_L = sr.downsample_np(img_L, sf, center=False)  # downsample, standard s-fold downsampler
                # img_L = util.uint2single(img_L)  # uint2single

                # np.random.seed(seed=0)  # for reproducibility
                # img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN

                util.imshow(util.single2uint(img_L)) if show_img else None

                x = util.single2tensor4(img_L)
                k = util.single2tensor4(kernel[..., np.newaxis])
                sigma = torch.tensor(noise_level_model).float().view(
                    [1, 1, 1, 1])
                [x, k, sigma] = [el.to(device) for el in [x, k, sigma]]

                # --------------------------------
                # (2) inference
                # --------------------------------
                x = model(x, k, sf, sigma)

                # --------------------------------
                # (3) img_E
                # --------------------------------
                img_E = util.tensor2uint(x)

                if save_E:
                    if testset_name == 'srcvte':
                        save_path = os.path.join(E_path, video_name)
                        util.mkdir(save_path)
                        # util.imsave(img_E, os.path.join(save_path, img_name+'_k'+str(k_index+1)+'.png'))
                        util.imsave(img_E,
                                    os.path.join(save_path, img_name + '.png'))
                    else:
                        util.imsave(
                            img_E,
                            os.path.join(
                                E_path, img_name + '_x' + str(sf) + '_k' +
                                str(k_index + 1) + '_' + model_name + '.png'))

                # --------------------------------
                # (4) img_H
                # --------------------------------
                if need_H:
                    img_H = util.imread_uint(H_paths[idx],
                                             n_channels=n_channels)
                    img_H = img_H.squeeze()
                    img_H = util.modcrop(img_H, sf)

                    psnr = util.calculate_psnr(
                        img_E, img_H, border=sf)  # change with your own border
                    ssim = util.calculate_ssim(img_E, img_H, border=sf)
                    test_results['psnr'].append(psnr)
                    test_results['ssim'].append(ssim)

                    if np.ndim(img_H) == 3:  # RGB image
                        img_E_y = util.rgb2ycbcr(img_E, only_y=True)
                        img_H_y = util.rgb2ycbcr(img_H, only_y=True)
                        psnr_y = util.calculate_psnr(img_E_y,
                                                     img_H_y,
                                                     border=sf)
                        ssim_y = util.calculate_ssim(img_E_y,
                                                     img_H_y,
                                                     border=sf)
                        test_results['psnr_y'].append(psnr_y)
                        test_results['ssim_y'].append(ssim_y)
                        logger.info(
                            '{:->4d} --> {:>4s}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB SSIM: {:.4f}'
                            .format(idx, video_name, img_name + ext, sf,
                                    k_index, psnr_y, ssim_y))
                    else:
                        logger.info(
                            '{:->4d} --> {:>4s}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB SSIM: {:.4f}'
                            .format(idx, video_name, img_name + ext, sf,
                                    k_index, psnr, ssim))

            if need_H:
                ave_psnr = sum(test_results['psnr']) / len(
                    test_results['psnr'])
                ave_ssim = sum(test_results['ssim']) / len(
                    test_results['ssim'])
                logger.info(
                    'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}'
                    .format(result_name, sf, ave_psnr, ave_ssim))
                logger.info(
                    '------> Average PSNR(RGB) - {} - x{}, kernel:{} sigma:{} --PSNR: {:.2f} dB; SSIM: {:.4f}'
                    .format(testset_name, sf, k_index + 1, noise_level_model,
                            ave_psnr, ave_ssim))
                if np.ndim(img_H) == 3:
                    ave_psnr_y = sum(test_results['psnr_y']) / len(
                        test_results['psnr_y'])
                    ave_ssim_y = sum(test_results['ssim_y']) / len(
                        test_results['ssim_y'])
                    logger.info(
                        '------> Average PSNR(Y) - {} - x{}, kernel:{} sigma:{} --PSNR: {:.2f} dB; SSIM: {:.4f}'
                        .format(testset_name, sf, k_index + 1,
                                noise_level_model, ave_psnr_y, ave_ssim_y))

                test_results_ave['psnr_sf_k'].append(ave_psnr)
                test_results_ave['ssim_sf_k'].append(ave_ssim)
                if np.ndim(img_H) == 3:
                    test_results_ave['psnr_y_sf_k'].append(ave_psnr_y)
                    test_results_ave['ssim_y_sf_k'].append(ave_ssim_y)

    logger.info(test_results_ave['psnr_sf_k'])
    logger.info(test_results_ave['ssim_sf_k'])
    if np.ndim(img_H) == 3:
        logger.info(test_results_ave['psnr_y_sf_k'])
        logger.info(test_results_ave['ssim_y_sf_k'])