コード例 #1
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------
    model_name = 'usrnet'  # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny'
    testset_name = 'set5'  # test set,  'set5' | 'srbsd68'
    need_degradation = True  # default: True
    sf = 4  # scale factor, only from {2, 3, 4}
    show_img = False  # default: False
    save_L = True  # save LR image
    save_E = True  # save estimated image

    # load approximated bicubic kernels
    #kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_bicubicx234.mat'))['kernels']
    kernels = loadmat(os.path.join('kernels',
                                   'kernels_bicubicx234.mat'))['kernels']
    kernel = kernels[0, sf - 2].astype(np.float64)
    kernel = util.single2tensor4(kernel[..., np.newaxis])

    task_current = 'sr'  # fixed, 'sr' for super-resolution
    n_channels = 3  # fixed, 3 for color image
    model_pool = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    noise_level_img = 0  # fixed: 0, noise level for LR image
    noise_level_model = noise_level_img  # fixed, noise level of model, default 0
    result_name = testset_name + '_' + model_name + '_bicubic'
    border = sf if task_current == 'sr' else 0  # shave boader to calculate PSNR and SSIM
    model_path = os.path.join(model_pool, model_name + '.pth')

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------
    L_path = os.path.join(
        testsets, testset_name)  # L_path, fixed, for Low-quality images
    H_path = L_path  # H_path, 'None' | L_path, for High-quality images
    E_path = os.path.join(results,
                          result_name)  # E_path, fixed, for Estimated images
    util.mkdir(E_path)

    if H_path == L_path:
        need_degradation = True
    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    need_H = True if H_path is not None else False
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------
    from models.network_usrnet import USRNet as net  # for pytorch version <= 1.7.1
    # from models.network_usrnet_v1 import USRNet as net  # for pytorch version >=1.8.1

    if 'tiny' in model_name:
        model = net(n_iter=6,
                    h_nc=32,
                    in_nc=4,
                    out_nc=3,
                    nc=[16, 32, 64, 64],
                    nb=2,
                    act_mode="R",
                    downsample_mode='strideconv',
                    upsample_mode="convtranspose")
    else:
        model = net(n_iter=8,
                    h_nc=64,
                    in_nc=4,
                    out_nc=3,
                    nc=[64, 128, 256, 512],
                    nb=2,
                    act_mode="R",
                    downsample_mode='strideconv',
                    upsample_mode="convtranspose")

    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for key, v in model.named_parameters():
        v.requires_grad = False

    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    logger.info('Params number: {}'.format(number_parameters))
    model = model.to(device)
    logger.info('Model path: {:s}'.format(model_path))

    test_results = OrderedDict()
    test_results['psnr'] = []
    test_results['ssim'] = []
    test_results['psnr_y'] = []
    test_results['ssim_y'] = []

    logger.info('model_name:{}, image sigma:{}'.format(model_name,
                                                       noise_level_img))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)
    H_paths = util.get_image_paths(H_path) if need_H else None

    for idx, img in enumerate(L_paths):

        # ------------------------------------
        # (1) img_L
        # ------------------------------------
        img_name, ext = os.path.splitext(os.path.basename(img))
        logger.info('{:->4d}--> {:>10s}'.format(idx + 1, img_name + ext))
        img_L = util.imread_uint(img, n_channels=n_channels)
        img_L = util.uint2single(img_L)

        # degradation process, bicubic downsampling
        if need_degradation:
            img_L = util.modcrop(img_L, sf)
            img_L = util.imresize_np(img_L, 1 / sf)

            # img_L = util.uint2single(util.single2uint(img_L))
            # np.random.seed(seed=0)  # for reproducibility
            # img_L += np.random.normal(0, noise_level_img/255., img_L.shape)

        w, h = img_L.shape[:2]

        if save_L:
            util.imsave(
                util.single2uint(img_L),
                os.path.join(E_path, img_name + '_LR_x' + str(sf) + '.png'))

        img = cv2.resize(img_L, (sf * h, sf * w),
                         interpolation=cv2.INTER_NEAREST)
        img = utils_deblur.wrap_boundary_liu(img, [
            int(np.ceil(sf * w / 8 + 2) * 8),
            int(np.ceil(sf * h / 8 + 2) * 8)
        ])
        img_wrap = sr.downsample_np(img, sf, center=False)
        img_wrap[:w, :h, :] = img_L
        img_L = img_wrap

        util.imshow(util.single2uint(img_L),
                    title='LR image with noise level {}'.format(
                        noise_level_img)) if show_img else None

        img_L = util.single2tensor4(img_L)
        img_L = img_L.to(device)

        # ------------------------------------
        # (2) img_E
        # ------------------------------------
        sigma = torch.tensor(noise_level_model).float().view([1, 1, 1, 1])
        [img_L, kernel,
         sigma] = [el.to(device) for el in [img_L, kernel, sigma]]

        img_E = model(img_L, kernel, sf, sigma)

        img_E = util.tensor2uint(img_E)
        img_E = img_E[:sf * w, :sf * h, :]

        if need_H:

            # --------------------------------
            # (3) img_H
            # --------------------------------
            img_H = util.imread_uint(H_paths[idx], n_channels=n_channels)
            img_H = img_H.squeeze()
            img_H = util.modcrop(img_H, sf)

            # --------------------------------
            # PSNR and SSIM
            # --------------------------------
            psnr = util.calculate_psnr(img_E, img_H, border=border)
            ssim = util.calculate_ssim(img_E, img_H, border=border)
            test_results['psnr'].append(psnr)
            test_results['ssim'].append(ssim)
            logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format(
                img_name + ext, psnr, ssim))
            util.imshow(np.concatenate([img_E, img_H], axis=1),
                        title='Recovered / Ground-truth') if show_img else None

            if np.ndim(img_H) == 3:  # RGB image
                img_E_y = util.rgb2ycbcr(img_E, only_y=True)
                img_H_y = util.rgb2ycbcr(img_H, only_y=True)
                psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border)
                ssim_y = util.calculate_ssim(img_E_y, img_H_y, border=border)
                test_results['psnr_y'].append(psnr_y)
                test_results['ssim_y'].append(ssim_y)

        # ------------------------------------
        # save results
        # ------------------------------------
        if save_E:
            util.imsave(
                img_E,
                os.path.join(
                    E_path,
                    img_name + '_x' + str(sf) + '_' + model_name + '.png'))

    if need_H:
        ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
        ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
        logger.info(
            'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}'
            .format(result_name, sf, ave_psnr, ave_ssim))
        if np.ndim(img_H) == 3:
            ave_psnr_y = sum(test_results['psnr_y']) / len(
                test_results['psnr_y'])
            ave_ssim_y = sum(test_results['ssim_y']) / len(
                test_results['ssim_y'])
            logger.info(
                'Average PSNR/SSIM( Y ) - {} - x{} - PSNR: {:.2f} dB; SSIM: {:.4f}'
                .format(result_name, sf, ave_psnr_y, ave_ssim_y))
コード例 #2
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------

    model_name = 'dncnn3'  # 'dncnn3'- can be used for blind Gaussian denoising, JPEG deblocking (quality factor 5-100) and super-resolution (x234)

    # important!
    testset_name = 'bsd68'  # test set, low-quality grayscale/color JPEG images
    n_channels = 1  # set 1 for grayscale image, set 3 for color image

    x8 = False  # default: False, x8 to boost performance
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    result_name = testset_name + '_' + model_name  # fixed
    L_path = os.path.join(
        testsets, testset_name
    )  # L_path, for Low-quality grayscale/Y-channel JPEG images
    E_path = os.path.join(results, result_name)  # E_path, for Estimated images
    util.mkdir(E_path)

    model_pool = 'model_zoo'  # fixed
    model_path = os.path.join(model_pool, model_name + '.pth')
    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------

    from models.network_dncnn import DnCNN as net
    model = net(in_nc=1, out_nc=1, nc=64, nb=20, act_mode='R')
    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)
    logger.info('Model path: {:s}'.format(model_path))
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    logger.info('Params number: {}'.format(number_parameters))

    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    for idx, img in enumerate(L_paths):

        # ------------------------------------
        # (1) img_L
        # ------------------------------------
        img_name, ext = os.path.splitext(os.path.basename(img))
        logger.info('{:->4d}--> {:>10s}'.format(idx + 1, img_name + ext))
        img_L = util.imread_uint(img, n_channels=n_channels)
        img_L = util.uint2single(img_L)
        if n_channels == 3:
            ycbcr = util.rgb2ycbcr(img_L, False)
            img_L = ycbcr[..., 0:1]
        img_L = util.single2tensor4(img_L)
        img_L = img_L.to(device)

        # ------------------------------------
        # (2) img_E
        # ------------------------------------
        if not x8:
            img_E = model(img_L)
        else:
            img_E = utils_model.test_mode(model, img_L, mode=3)

        img_E = util.tensor2single(img_E)
        if n_channels == 3:
            ycbcr[..., 0] = img_E
            img_E = util.ycbcr2rgb(ycbcr)
        img_E = util.single2uint(img_E)

        # ------------------------------------
        # save results
        # ------------------------------------
        util.imsave(img_E, os.path.join(E_path, img_name + '.png'))
コード例 #3
0
ファイル: main_test_ffdnet.py プロジェクト: 13952522076/PAIR
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------

    noise_level_img = 30  # noise level for noisy image
    noise_level_model = noise_level_img  # noise level for model
    model_name = 'ffdnet_color'  # 'ffdnet_gray' | 'ffdnet_color' | 'ffdnet_color_clip' | 'ffdnet_gray_clip'
    testset_name = 'CBSD68'  # test set,  'bsd68' | 'cbsd68' | 'set12'
    need_degradation = True  # default: True
    show_img = False  # default: False

    task_current = 'dn'  # 'dn' for denoising | 'sr' for super-resolution
    sf = 1  # unused for denoising
    if 'color' in model_name:
        n_channels = 3  # setting for color image
        nc = 96  # setting for color image
        nb = 12  # setting for color image
    else:
        n_channels = 1  # setting for grayscale image
        nc = 64  # setting for grayscale image
        nb = 15  # setting for grayscale image
    if 'clip' in model_name:
        use_clip = True  # clip the intensities into range of [0, 1]
    else:
        use_clip = False
    model_pool = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    result_name = testset_name + '_' + model_name
    border = sf if task_current == 'sr' else 0  # shave boader to calculate PSNR and SSIM
    model_path = os.path.join(model_pool, model_name + '.pth')

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------

    L_path = os.path.join(testsets,
                          testset_name)  # L_path, for Low-quality images
    H_path = L_path  # H_path, for High-quality images
    E_path = os.path.join(results, result_name)  # E_path, for Estimated images
    util.mkdir(E_path)

    if H_path == L_path:
        need_degradation = True
    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    need_H = True if H_path is not None else False
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------

    from models.network_ffdnet import FFDNet as net
    model = net(in_nc=n_channels,
                out_nc=n_channels,
                nc=nc,
                nb=nb,
                act_mode='R')
    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)
    logger.info('Model path: {:s}'.format(model_path))

    test_results = OrderedDict()
    test_results['psnr'] = []
    test_results['ssim'] = []

    logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format(
        model_name, noise_level_img, noise_level_model))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)
    H_paths = util.get_image_paths(H_path) if need_H else None

    for idx, img in enumerate(L_paths):

        # ------------------------------------
        # (1) img_L
        # ------------------------------------

        img_name, ext = os.path.splitext(os.path.basename(img))
        # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext))
        img_L = util.imread_uint(img, n_channels=n_channels)
        img_L = util.uint2single(img_L)

        if need_degradation:  # degradation process
            np.random.seed(seed=0)  # for reproducibility
            img_L += np.random.normal(0, noise_level_img / 255., img_L.shape)
            if use_clip:
                img_L = util.uint2single(util.single2uint(img_L))

        util.imshow(util.single2uint(img_L),
                    title='Noisy image with noise level {}'.format(
                        noise_level_img)) if show_img else None

        img_L = util.single2tensor4(img_L)
        img_L = img_L.to(device)

        sigma = torch.full((1, 1, 1, 1),
                           noise_level_model / 255.).type_as(img_L)

        # ------------------------------------
        # (2) img_E
        # ------------------------------------

        img_E = model(img_L, sigma)
        img_E = util.tensor2uint(img_E)

        if need_H:

            # --------------------------------
            # (3) img_H
            # --------------------------------
            img_H = util.imread_uint(H_paths[idx], n_channels=n_channels)
            img_H = img_H.squeeze()

            # --------------------------------
            # PSNR and SSIM
            # --------------------------------

            psnr = util.calculate_psnr(img_E, img_H, border=border)
            ssim = util.calculate_ssim(img_E, img_H, border=border)
            test_results['psnr'].append(psnr)
            test_results['ssim'].append(ssim)
            logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format(
                img_name + ext, psnr, ssim))
            util.imshow(np.concatenate([img_E, img_H], axis=1),
                        title='Recovered / Ground-truth') if show_img else None

        # ------------------------------------
        # save results
        # ------------------------------------

        util.imsave(img_E, os.path.join(E_path, img_name + ext))

    if need_H:
        ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
        ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
        logger.info(
            'Average PSNR/SSIM(RGB) - {} - PSNR: {:.2f} dB; SSIM: {:.4f}'.
            format(result_name, ave_psnr, ave_ssim))
コード例 #4
0
ファイル: test_demo.py プロジェクト: pppLang/AIM2019
def main(model=None, model_path=None):

    utils_logger.logger_info('AIM-track',
                             log_path=os.path.join(model_path,
                                                   'AIM-track.log'))
    logger = logging.getLogger('AIM-track')

    # --------------------------------
    # basic settings
    # --------------------------------
    testsets = 'DIV2K'  # DIV2K root path
    testset_L = 'DIV2K_test_LR_bicubic'  # test image folder name

    torch.cuda.current_device()
    torch.cuda.empty_cache()
    torch.backends.cudnn.benchmark = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # --------------------------------
    # load model
    # --------------------------------
    # model_path = os.path.join('MSRResNetx4_model', 'MSRResNetx4.pth')
    if model is None:
        model_path = 'MSRResNetx4_model'
        model = MSRResNet(in_nc=3, out_nc=3, nf=64, nb=16, upscale=4)
        model.load_state_dict(torch.load(os.path.join(model_path,
                                                      'model.pth')),
                              strict=True)
    model.eval()
    """ for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device) """

    # number of parameters
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    logger.info('Params number: {}'.format(number_parameters))
    print('Params number: {}'.format(number_parameters))

    # --------------------------------
    # read image
    # --------------------------------
    L_folder = os.path.join(testsets, testset_L)
    assert os.path.isdir(L_folder)  # check the test images path
    E_folder = os.path.join(model_path, 'results')
    util.mkdir(E_folder)

    # record PSNR, runtime
    test_results = OrderedDict()
    test_results['runtime'] = []

    logger.info(L_folder)
    logger.info(E_folder)
    idx = 0

    start = torch.cuda.Event(enable_timing=True)
    end = torch.cuda.Event(enable_timing=True)

    for img in util.get_image_paths(L_folder):

        # --------------------------------
        # (1) img_L
        # --------------------------------
        idx += 1
        img_name, ext = os.path.splitext(os.path.basename(img))
        logger.info('{:->4d}--> {:>10s}'.format(idx, img_name + ext))

        img_L = util.imread_uint(img, n_channels=3)
        img_L = util.uint2tensor4(img_L)
        img_L = img_L.to(device)

        start.record()
        img_E = model(img_L)
        end.record()
        torch.cuda.synchronize()
        test_results['runtime'].append(start.elapsed_time(end))  # milliseconds

        #        torch.cuda.synchronize()
        #        start = time.time()
        #        img_E = model(img_L)
        #        torch.cuda.synchronize()
        #        end = time.time()
        #        test_results['runtime'].append(end-start)  # seconds

        # --------------------------------
        # (2) img_E
        # --------------------------------
        img_E = util.tensor2uint(img_E)

        util.imsave(img_E, os.path.join(E_folder, img_name + ext))
    ave_runtime = sum(test_results['runtime']) / len(
        test_results['runtime']) / 1000.0
    logger.info('------> Average runtime of ({}) is : {:.6f} seconds'.format(
        L_folder, ave_runtime))
    print('------> Average runtime of ({}) is : {:.6f} seconds'.format(
        L_folder, ave_runtime))
コード例 #5
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------

    noise_level_img = 0  # default: 0, noise level for LR image
    noise_level_model = noise_level_img  # noise level for model
    model_name = 'srmdnf_x4'  # 'srmd_x2' | 'srmd_x3' | 'srmd_x4' | 'srmdnf_x2' | 'srmdnf_x3' | 'srmdnf_x4'
    testset_name = 'set5'  # test set,  'set5' | 'srbsd68'
    sf = [int(s) for s in re.findall(r'\d+', model_name)][0]  # scale factor
    x8 = False  # default: False, x8 to boost performance
    need_degradation = True  # default: True, use degradation model to generate LR image
    show_img = False  # default: False

    srmd_pca_path = os.path.join('kernels', 'srmd_pca_matlab.mat')
    task_current = 'sr'  # 'dn' for denoising | 'sr' for super-resolution
    n_channels = 3  # fixed
    in_nc = 18 if 'nf' in model_name else 19
    nc = 128  # fixed, number of channels
    nb = 12  # fixed, number of conv layers
    model_pool = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    result_name = testset_name + '_' + model_name
    border = sf if task_current == 'sr' else 0  # shave boader to calculate PSNR and SSIM
    model_path = os.path.join(model_pool, model_name + '.pth')

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------

    L_path = os.path.join(testsets,
                          testset_name)  # L_path, for Low-quality images
    H_path = L_path  # H_path, for High-quality images
    E_path = os.path.join(results, result_name)  # E_path, for Estimated images
    util.mkdir(E_path)

    if H_path == L_path:
        need_degradation = True
    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    need_H = True if H_path is not None else False
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------

    from models.network_srmd import SRMD as net
    model = net(in_nc=in_nc,
                out_nc=n_channels,
                nc=nc,
                nb=nb,
                upscale=sf,
                act_mode='R',
                upsample_mode='pixelshuffle')
    model.load_state_dict(torch.load(model_path), strict=False)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)
    logger.info('Model path: {:s}'.format(model_path))
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    logger.info('Params number: {}'.format(number_parameters))

    test_results = OrderedDict()
    test_results['psnr'] = []
    test_results['ssim'] = []
    test_results['psnr_y'] = []
    test_results['ssim_y'] = []

    logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format(
        model_name, noise_level_img, noise_level_model))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)
    H_paths = util.get_image_paths(H_path) if need_H else None

    # ----------------------------------------
    # kernel and PCA reduced feature
    # ----------------------------------------

    # kernel = sr.anisotropic_Gaussian(ksize=15, theta=np.pi, l1=4, l2=4)
    kernel = utils_deblur.fspecial('gaussian', 15,
                                   0.01)  # Gaussian kernel, delta kernel 0.01

    P = loadmat(srmd_pca_path)['P']
    degradation_vector = np.dot(P, np.reshape(kernel, (-1), order="F"))
    if 'nf' not in model_name:  # noise-free SR
        degradation_vector = np.append(degradation_vector,
                                       noise_level_model / 255.)
    degradation_vector = torch.from_numpy(degradation_vector).view(
        1, -1, 1, 1).float()

    for idx, img in enumerate(L_paths):

        # ------------------------------------
        # (1) img_L
        # ------------------------------------

        img_name, ext = os.path.splitext(os.path.basename(img))
        # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext))
        img_L = util.imread_uint(img, n_channels=n_channels)
        img_L = util.uint2single(img_L)

        # degradation process, blur + bicubic downsampling + Gaussian noise
        if need_degradation:
            img_L = util.modcrop(img_L, sf)
            img_L = sr.srmd_degradation(
                img_L, kernel, sf
            )  # equivalent to bicubic degradation if kernel is a delta kernel
            np.random.seed(seed=0)  # for reproducibility
            img_L += np.random.normal(0, noise_level_img / 255., img_L.shape)

        util.imshow(util.single2uint(img_L),
                    title='LR image with noise level {}'.format(
                        noise_level_img)) if show_img else None

        img_L = util.single2tensor4(img_L)
        degradation_map = degradation_vector.repeat(1, 1, img_L.size(-2),
                                                    img_L.size(-1))
        img_L = torch.cat((img_L, degradation_map), dim=1)
        img_L = img_L.to(device)

        # ------------------------------------
        # (2) img_E
        # ------------------------------------

        if not x8:
            img_E = model(img_L)
        else:
            img_E = utils_model.test_mode(model, img_L, mode=3, sf=sf)

        img_E = util.tensor2uint(img_E)

        if need_H:

            # --------------------------------
            # (3) img_H
            # --------------------------------

            img_H = util.imread_uint(H_paths[idx], n_channels=n_channels)
            img_H = img_H.squeeze()
            img_H = util.modcrop(img_H, sf)

            # --------------------------------
            # PSNR and SSIM
            # --------------------------------

            psnr = util.calculate_psnr(img_E, img_H, border=border)
            ssim = util.calculate_ssim(img_E, img_H, border=border)
            test_results['psnr'].append(psnr)
            test_results['ssim'].append(ssim)
            logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format(
                img_name + ext, psnr, ssim))
            util.imshow(np.concatenate([img_E, img_H], axis=1),
                        title='Recovered / Ground-truth') if show_img else None

            if np.ndim(img_H) == 3:  # RGB image
                img_E_y = util.rgb2ycbcr(img_E, only_y=True)
                img_H_y = util.rgb2ycbcr(img_H, only_y=True)
                psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border)
                ssim_y = util.calculate_ssim(img_E_y, img_H_y, border=border)
                test_results['psnr_y'].append(psnr_y)
                test_results['ssim_y'].append(ssim_y)

        # ------------------------------------
        # save results
        # ------------------------------------

        util.imsave(img_E, os.path.join(E_path, img_name + '.png'))

    if need_H:
        ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
        ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
        logger.info(
            'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}'
            .format(result_name, sf, ave_psnr, ave_ssim))
        if np.ndim(img_H) == 3:
            ave_psnr_y = sum(test_results['psnr_y']) / len(
                test_results['psnr_y'])
            ave_ssim_y = sum(test_results['ssim_y']) / len(
                test_results['ssim_y'])
            logger.info(
                'Average PSNR/SSIM( Y ) - {} - x{} - PSNR: {:.2f} dB; SSIM: {:.4f}'
                .format(result_name, sf, ave_psnr_y, ave_ssim_y))
コード例 #6
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------

    noise_level_img = 0 / 255.0  # set AWGN noise level for LR image, default: 0
    noise_level_model = noise_level_img  # set noise level of model, default: 0
    model_name = 'ircnn_color'  # set denoiser, 'drunet_color' | 'ircnn_color'
    testset_name = 'Set18'  # set testing set,  'set18' | 'set24'
    x8 = True  # set PGSE to boost performance, default: True
    iter_num = 40  # set number of iterations, default: 40 for demosaicing
    modelSigma1 = 49  # set sigma_1, default: 49
    modelSigma2 = max(0.6, noise_level_model * 255.)  # set sigma_2, default
    matlab_init = True

    show_img = False  # default: False
    save_L = True  # save LR image
    save_E = True  # save estimated image
    save_LEH = False  # save zoomed LR, E and H images
    border = 10  # default 10 for demosaicing

    task_current = 'dm'  # 'dm' for demosaicing
    n_channels = 3  # fixed
    model_zoo = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    result_name = testset_name + '_' + task_current + '_' + model_name
    model_path = os.path.join(model_zoo, model_name + '.pth')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.cuda.empty_cache()

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------

    L_path = os.path.join(testsets,
                          testset_name)  # L_path, for Low-quality images
    E_path = os.path.join(results, result_name)  # E_path, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    # ----------------------------------------
    # load model
    # ----------------------------------------

    if 'drunet' in model_name:
        from models.network_unet import UNetRes as net
        model = net(in_nc=n_channels + 1,
                    out_nc=n_channels,
                    nc=[64, 128, 256, 512],
                    nb=4,
                    act_mode='R',
                    downsample_mode="strideconv",
                    upsample_mode="convtranspose")
        model.load_state_dict(torch.load(model_path), strict=True)
        model.eval()
        for _, v in model.named_parameters():
            v.requires_grad = False
        model = model.to(device)
    elif 'ircnn' in model_name:
        from models.network_dncnn import IRCNN as net
        model = net(in_nc=n_channels, out_nc=n_channels, nc=64)
        model25 = torch.load(model_path)
        former_idx = 0

    logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(
        model_name, noise_level_img, noise_level_model))
    logger.info('Model path: {:s}'.format(model_path))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    test_results = OrderedDict()
    test_results['psnr'] = []

    for idx, img in enumerate(L_paths):

        # --------------------------------
        # (1) get img_H and img_L
        # --------------------------------

        idx += 1
        img_name, ext = os.path.splitext(os.path.basename(img))
        img_H = util.imread_uint(img, n_channels=n_channels)
        CFA, CFA4, mosaic, mask = utils_mosaic.mosaic_CFA_Bayer(img_H)

        # --------------------------------
        # (2) initialize x
        # --------------------------------

        if matlab_init:  # matlab demosaicing for initialization
            CFA4 = util.uint2tensor4(CFA4).to(device)
            x = utils_mosaic.dm_matlab(CFA4)
        else:
            x = cv2.cvtColor(CFA, cv2.COLOR_BAYER_BG2RGB_EA)
            x = util.uint2tensor4(x).to(device)

        img_L = util.tensor2uint(x)
        y = util.uint2tensor4(mosaic).to(device)

        util.imshow(img_L) if show_img else None
        mask = util.single2tensor4(mask.astype(np.float32)).to(device)

        # --------------------------------
        # (3) get rhos and sigmas
        # --------------------------------

        rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255.,
                                                   noise_level_img),
                                         iter_num=iter_num,
                                         modelSigma1=modelSigma1,
                                         modelSigma2=modelSigma2,
                                         w=1.0)
        rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to(
            device)

        # --------------------------------
        # (4) main iterations
        # --------------------------------

        for i in range(iter_num):

            # --------------------------------
            # step 1, closed-form solution
            # --------------------------------

            x = (y + rhos[i].float() * x).div(mask + rhos[i])

            # --------------------------------
            # step 2, denoiser
            # --------------------------------

            if 'ircnn' in model_name:
                current_idx = np.int(
                    np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1)
                if current_idx != former_idx:
                    model.load_state_dict(model25[str(current_idx)],
                                          strict=True)
                    model.eval()
                    for _, v in model.named_parameters():
                        v.requires_grad = False
                    model = model.to(device)
                former_idx = current_idx

            x = torch.clamp(x, 0, 1)
            if x8:
                x = util.augment_img_tensor4(x, i % 8)

            if 'drunet' in model_name:
                x = torch.cat((x, sigmas[i].float().repeat(
                    1, 1, x.shape[2], x.shape[3])),
                              dim=1)
                x = utils_model.test_mode(model,
                                          x,
                                          mode=2,
                                          refield=32,
                                          min_size=256,
                                          modulo=16)
                # x = model(x)
            elif 'ircnn' in model_name:
                x = model(x)

            if x8:
                if i % 8 == 3 or i % 8 == 5:
                    x = util.augment_img_tensor4(x, 8 - i % 8)
                else:
                    x = util.augment_img_tensor4(x, i % 8)

        x[mask.to(torch.bool)] = y[mask.to(torch.bool)]

        # --------------------------------
        # (4) img_E
        # --------------------------------

        img_E = util.tensor2uint(x)
        psnr = util.calculate_psnr(img_E, img_H, border=border)
        test_results['psnr'].append(psnr)
        logger.info('{:->4d}--> {:>10s} -- PSNR: {:.2f}dB'.format(
            idx, img_name + ext, psnr))

        if save_E:
            util.imsave(
                img_E,
                os.path.join(E_path, img_name + '_' + model_name + '.png'))

        if save_L:
            util.imsave(img_L, os.path.join(E_path, img_name + '_L.png'))

        if save_LEH:
            util.imsave(
                np.concatenate([img_L, img_E, img_H], axis=1),
                os.path.join(E_path, img_name + model_name + '_LEH.png'))

    ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
    logger.info('------> Average PSNR(RGB) of ({}) is : {:.2f} dB'.format(
        testset_name, ave_psnr))
コード例 #7
0
def main():

    # --------------------------------
    # let's start!
    # --------------------------------
    utils_logger.logger_info('test_srresnetplus',
                             log_path='test_srresnetplus.log')
    logger = logging.getLogger('test_srresnetplus')

    # basic setting
    # ================================================

    sf = 4  # scale factor
    noise_level_img = 0 / 255.0  # noise level of L image
    noise_level_model = noise_level_img
    show_img = True

    use_srganplus = True  # 'True' for SRGAN+ (x4) and 'False' for SRResNet+ (x2,x3,x4)
    testsets = 'testsets'
    testset_current = 'Set5'
    n_channels = 3  # only color images, fixed
    border = sf  # shave boader to calculate PSNR and SSIM

    if use_srganplus and sf == 4:
        model_prefix = 'DPSRGAN'
        save_suffix = 'dpsrgan'
    else:
        model_prefix = 'DPSR'
        save_suffix = 'dpsr'

    model_path = os.path.join('DPSR_models', model_prefix + 'x%01d.pth' % (sf))

    # --------------------------------
    # L_folder, E_folder, H_folder
    # --------------------------------
    # --1--> L_folder, folder of Low-quality images
    testsubset_current = 'x%01d' % (sf)
    L_folder = os.path.join(testsets, testset_current, testsubset_current)

    # --2--> E_folder, folder of Estimated images
    E_folder = os.path.join(testsets, testset_current,
                            testsubset_current + '_' + save_suffix)
    util.mkdir(E_folder)

    # --3--> H_folder, folder of High-quality images
    H_folder = os.path.join(testsets, testset_current, 'GT')

    need_H = True if os.path.exists(H_folder) else False

    # ================================================

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # --------------------------------
    # load model
    # --------------------------------
    model = SRResNet(in_nc=4,
                     out_nc=3,
                     nc=96,
                     nb=16,
                     upscale=sf,
                     act_mode='R',
                     upsample_mode='pixelshuffle')
    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)
    logger.info('Model path {:s}. \nTesting...'.format(model_path))

    test_results = OrderedDict()
    test_results['psnr'] = []
    test_results['ssim'] = []
    test_results['psnr_y'] = []
    test_results['ssim_y'] = []

    idx = 0

    logger.info(L_folder)

    for im in os.listdir(os.path.join(L_folder)):
        if im.endswith('.jpg') or im.endswith('.bmp') or im.endswith('.png'):

            logger.info('{:->4d}--> {:>10s}'.format(
                idx, im)) if not need_H else None

            # --------------------------------
            # (1) img_L
            # --------------------------------
            idx += 1
            img_name, ext = os.path.splitext(im)
            img = util.imread_uint(os.path.join(L_folder, im),
                                   n_channels=n_channels)

            np.random.seed(seed=0)  # for reproducibility
            img = util.unit2single(img) + np.random.normal(
                0, noise_level_img, img.shape)

            util.imshow(img,
                        title='Low-resolution image') if show_img else None

            img_L = util.single2tensor4(img)
            noise_level_map = torch.ones(
                (1, 1, img_L.size(2), img_L.size(3)),
                dtype=torch.float).mul_(noise_level_model)
            img_L = torch.cat((img_L, noise_level_map), dim=1)
            img_L = img_L.to(device)

            # --------------------------------
            # (2) img_E
            # --------------------------------
            img_E = model(img_L)
            img_E = util.tensor2single(img_E)
            img_E = util.single2uint(img_E)  # np.uint8((z * 255.0).round())

            if need_H:

                # --------------------------------
                # (3) img_H
                # --------------------------------
                img_H = util.imread_uint(os.path.join(H_folder, im),
                                         n_channels=n_channels)
                img_H = util.modcrop(img_H, scale=sf)

                # --------------------------------
                # PSNR and SSIM
                # --------------------------------
                psnr = util.calculate_psnr(img_E, img_H, border=border)
                ssim = util.calculate_ssim(img_E, img_H, border=border)
                test_results['psnr'].append(psnr)
                test_results['ssim'].append(ssim)

                if np.ndim(img_H) == 3:  # RGB image

                    img_E_y = util.rgb2ycbcr(img_E, only_y=True)
                    img_H_y = util.rgb2ycbcr(img_H, only_y=True)
                    psnr_y = util.calculate_psnr(img_E_y,
                                                 img_H_y,
                                                 border=border)
                    ssim_y = util.calculate_ssim(img_E_y,
                                                 img_H_y,
                                                 border=border)
                    test_results['psnr_y'].append(psnr_y)
                    test_results['ssim_y'].append(ssim_y)

                    logger.info(
                        '{:->20s} - PSNR: {:.2f} dB; SSIM: {:.4f}; PSNR_Y: {:.2f} dB; SSIM_Y: {:.4f}.'
                        .format(im, psnr, ssim, psnr_y, ssim_y))
                else:
                    logger.info(
                        '{:20s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format(
                            im, psnr, ssim))

            # --------------------------------
            # save results
            # --------------------------------
            util.imshow(np.concatenate([img_E, img_H], axis=1),
                        title='Recovered / Ground-truth') if show_img else None
            util.imsave(
                img_E,
                os.path.join(E_folder, img_name + '_x{}'.format(sf) + ext))

    if need_H:

        ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
        ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
        logger.info(
            'PSNR/SSIM(RGB) - {} - x{} -- PSNR: {:.2f} dB; SSIM: {:.4f}'.
            format(testset_current, sf, ave_psnr, ave_ssim))
        if np.ndim(img_H) == 3:
            ave_psnr_y = sum(test_results['psnr_y']) / len(
                test_results['psnr_y'])
            ave_ssim_y = sum(test_results['ssim_y']) / len(
                test_results['ssim_y'])
            logger.info(
                'PSNR/SSIM( Y ) - {} - x{} -- PSNR: {:.2f} dB; SSIM: {:.4f}'.
                format(testset_current, sf, ave_psnr_y, ave_ssim_y))
コード例 #8
0
ファイル: main_test_table1.py プロジェクト: zjucmx/USRNet
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------
    model_name = 'usrnet'  # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny'
    testset_name = 'set5'  # test set,  'set5' | 'srbsd68'
    test_sf = [4] if 'gan' in model_name else [
        2, 3, 4
    ]  # scale factor, from {1,2,3,4}

    show_img = False  # default: False
    save_L = True  # save LR image
    save_E = True  # save estimated image
    save_LEH = False  # save zoomed LR, E and H images

    # ----------------------------------------
    # load testing kernels
    # ----------------------------------------
    # kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels.mat'))['kernels']
    kernels = loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']

    n_channels = 1 if 'gray' in model_name else 3  # 3 for color image, 1 for grayscale image
    model_pool = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    noise_level_img = 0  # fixed: 0, noise level for LR image
    noise_level_model = noise_level_img  # fixed, noise level of model, default 0
    result_name = testset_name + '_' + model_name
    model_path = os.path.join(model_pool, model_name + '.pth')

    # ----------------------------------------
    # L_path = H_path, E_path, logger
    # ----------------------------------------
    L_path = os.path.join(
        testsets,
        testset_name)  # L_path and H_path, fixed, for Low-quality images
    E_path = os.path.join(results,
                          result_name)  # E_path, fixed, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------
    if 'tiny' in model_name:
        model = net(n_iter=6,
                    h_nc=32,
                    in_nc=4,
                    out_nc=3,
                    nc=[16, 32, 64, 64],
                    nb=2,
                    act_mode="R",
                    downsample_mode='strideconv',
                    upsample_mode="convtranspose")
    else:
        model = net(n_iter=8,
                    h_nc=64,
                    in_nc=4,
                    out_nc=3,
                    nc=[64, 128, 256, 512],
                    nb=2,
                    act_mode="R",
                    downsample_mode='strideconv',
                    upsample_mode="convtranspose")

    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for key, v in model.named_parameters():
        v.requires_grad = False
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    model = model.to(device)

    logger.info('Model path: {:s}'.format(model_path))
    logger.info('Params number: {}'.format(number_parameters))
    logger.info('Model_name:{}, image sigma:{}'.format(model_name,
                                                       noise_level_img))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    # --------------------------------
    # read images
    # --------------------------------
    test_results_ave = OrderedDict()
    test_results_ave['psnr_sf_k'] = []

    for sf in test_sf:

        for k_index in range(kernels.shape[1]):

            test_results = OrderedDict()
            test_results['psnr'] = []
            kernel = kernels[0, k_index].astype(np.float64)

            ## other kernels
            # kernel = utils_deblur.blurkernel_synthesis(h=25)  # motion kernel
            # kernel = utils_deblur.fspecial('gaussian', 25, 1.6) # Gaussian kernel
            # kernel = sr.shift_pixel(kernel, sf)  # pixel shift; optional
            # kernel /= np.sum(kernel)

            util.surf(kernel) if show_img else None
            idx = 0

            for img in L_paths:

                # --------------------------------
                # (1) classical degradation, img_L
                # --------------------------------
                idx += 1
                img_name, ext = os.path.splitext(os.path.basename(img))
                img_H = util.imread_uint(
                    img, n_channels=n_channels)  # HR image, int8
                img_H = util.modcrop(img_H, np.lcm(sf, 8))  # modcrop

                # generate degraded LR image
                img_L = ndimage.filters.convolve(img_H,
                                                 kernel[..., np.newaxis],
                                                 mode='wrap')  # blur
                img_L = sr.downsample_np(
                    img_L, sf,
                    center=False)  # downsample, standard s-fold downsampler
                img_L = util.uint2single(img_L)  # uint2single

                np.random.seed(seed=0)  # for reproducibility
                img_L += np.random.normal(0, noise_level_img,
                                          img_L.shape)  # add AWGN

                util.imshow(util.single2uint(img_L)) if show_img else None

                x = util.single2tensor4(img_L)
                k = util.single2tensor4(kernel[..., np.newaxis])
                sigma = torch.tensor(noise_level_model).float().view(
                    [1, 1, 1, 1])
                [x, k, sigma] = [el.to(device) for el in [x, k, sigma]]

                # --------------------------------
                # (2) inference
                # --------------------------------
                x = model(x, k, sf, sigma)

                # --------------------------------
                # (3) img_E
                # --------------------------------
                img_E = util.tensor2uint(x)

                if save_E:
                    util.imsave(
                        img_E,
                        os.path.join(
                            E_path, img_name + '_x' + str(sf) + '_k' +
                            str(k_index + 1) + '_' + model_name + '.png'))

                # --------------------------------
                # (4) img_LEH
                # --------------------------------
                img_L = util.single2uint(img_L)
                if save_LEH:
                    k_v = kernel / np.max(kernel) * 1.2
                    k_v = util.single2uint(
                        np.tile(k_v[..., np.newaxis], [1, 1, 3]))
                    k_v = cv2.resize(k_v, (3 * k_v.shape[1], 3 * k_v.shape[0]),
                                     interpolation=cv2.INTER_NEAREST)
                    img_I = cv2.resize(
                        img_L, (sf * img_L.shape[1], sf * img_L.shape[0]),
                        interpolation=cv2.INTER_NEAREST)
                    img_I[:k_v.shape[0], -k_v.shape[1]:, :] = k_v
                    img_I[:img_L.shape[0], :img_L.shape[1], :] = img_L
                    util.imshow(np.concatenate([img_I, img_E, img_H], axis=1),
                                title='LR / Recovered / Ground-truth'
                                ) if show_img else None
                    util.imsave(
                        np.concatenate([img_I, img_E, img_H], axis=1),
                        os.path.join(
                            E_path, img_name + '_x' + str(sf) + '_k' +
                            str(k_index + 1) + '_LEH.png'))

                if save_L:
                    util.imsave(
                        img_L,
                        os.path.join(
                            E_path, img_name + '_x' + str(sf) + '_k' +
                            str(k_index + 1) + '_LR.png'))

                psnr = util.calculate_psnr(
                    img_E, img_H, border=sf**2)  # change with your own border
                test_results['psnr'].append(psnr)
                logger.info(
                    '{:->4d}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB'.
                    format(idx, img_name + ext, sf, k_index, psnr))

            ave_psnr_k = sum(test_results['psnr']) / len(test_results['psnr'])
            logger.info(
                '------> Average PSNR(RGB) of ({}) scale factor: ({}), kernel: ({}) sigma: ({}): {:.2f} dB'
                .format(testset_name, sf, k_index + 1, noise_level_model,
                        ave_psnr_k))
            test_results_ave['psnr_sf_k'].append(ave_psnr_k)
    logger.info(test_results_ave['psnr_sf_k'])
コード例 #9
0
ファイル: main_test_bsrgan.py プロジェクト: zzloop/BSRGAN
def main():

    utils_logger.logger_info('blind_sr_log', log_path='blind_sr_log.log')
    logger = logging.getLogger('blind_sr_log')

#    print(torch.__version__)               # pytorch version
#    print(torch.version.cuda)              # cuda version
#    print(torch.backends.cudnn.version())  # cudnn version

    testsets = 'testsets'       # fixed, set path of testsets
    testset_Ls = ['RealSRSet']  # ['RealSRSet','DPED']

    model_names = ['RRDB','ESRGAN','FSSR_DPED','FSSR_JPEG','RealSR_DPED','RealSR_JPEG']
    model_names = ['BSRGAN']

    save_results = True
    sf = 4
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    for model_name in model_names:

        model_path = os.path.join('model_zoo', model_name+'.pth')          # set model path
        logger.info('{:>16s} : {:s}'.format('Model Name', model_name))

        # torch.cuda.set_device(0)      # set GPU ID
        logger.info('{:>16s} : {:<d}'.format('GPU ID', torch.cuda.current_device()))
        torch.cuda.empty_cache()

        # --------------------------------
        # define network and load model
        # --------------------------------
        model = net(in_nc=3, out_nc=3, nf=64, nb=23, gc=32)  # define network

#            model_old = torch.load(model_path)
#            state_dict = model.state_dict()
#            for ((key, param),(key2, param2)) in zip(model_old.items(), state_dict.items()):
#                state_dict[key2] = param
#            model.load_state_dict(state_dict, strict=True)

        model.load_state_dict(torch.load(model_path), strict=True)
        model.eval()
        for k, v in model.named_parameters():
            v.requires_grad = False
        model = model.to(device)
        torch.cuda.empty_cache()

        for testset_L in testset_Ls:

            L_path = os.path.join(testsets, testset_L)
            #E_path = os.path.join(testsets, testset_L+'_'+model_name)
            E_path = os.path.join(testsets, testset_L+'_results_x'+str(sf))
            util.mkdir(E_path)

            logger.info('{:>16s} : {:s}'.format('Input Path', L_path))
            logger.info('{:>16s} : {:s}'.format('Output Path', E_path))
            idx = 0

            for img in util.get_image_paths(L_path):

                # --------------------------------
                # (1) img_L
                # --------------------------------
                idx += 1
                img_name, ext = os.path.splitext(os.path.basename(img))
                logger.info('{:->4d} --> {:<s} --> x{:<d}--> {:<s}'.format(idx, model_name, sf, img_name+ext))

                img_L = util.imread_uint(img, n_channels=3)
                img_L = util.uint2tensor4(img_L)
                img_L = img_L.to(device)

                # --------------------------------
                # (2) inference
                # --------------------------------
                img_E = model(img_L)

                # --------------------------------
                # (3) img_E
                # --------------------------------
                img_E = util.tensor2uint(img_E)
                if save_results:
                    util.imsave(img_E, os.path.join(E_path, img_name+'_'+model_name+'.png'))
コード例 #10
0
def main():

    # --------------------------------
    # let's start!
    # --------------------------------
    utils_logger.logger_info('test_srresnetplus_real', log_path='test_srresnetplus_real.log')
    logger = logging.getLogger('test_srresnetplus_real')

    # basic setting
    # ================================================

    sf = 4  # from 2, 3 and 4
    noise_level_img = 14./255.  # noise level of low-quality image
    testsets = 'testsets'
    testset_current = 'real_imgs'
    use_srganplus = True  # 'True' for SRGAN+ (x4) and 'False' for SRResNet+ (x2,x3,x4)

    im = 'frog.png'  # frog.png

    if 'frog' in im:
        noise_level_img = 14./255.

    noise_level_model = noise_level_img  # noise level of model

    if use_srganplus and sf == 4:
        model_prefix = 'DPSRGAN'
        save_suffix = 'srganplus'
    else:
        model_prefix = 'DPSR'
        save_suffix = 'srresnet'

    model_path = os.path.join('DPSR_models', model_prefix+'x%01d.pth' % (sf))
    show_img = True
    n_channels = 3  # only color images, fixed

    # ================================================

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # --------------------------------
    # (1) load trained model
    # --------------------------------

    model = SRResNet(in_nc=4, out_nc=3, nc=96, nb=16, upscale=sf, act_mode='R', upsample_mode='pixelshuffle')
    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)
    logger.info('Model path {:s}. Testing...'.format(model_path))

    # --------------------------------
    # (2) L_folder, E_folder
    # --------------------------------
    # --1--> L_folder, folder of Low-quality images
    L_folder = os.path.join(testsets, testset_current, 'LR')  # L: Low quality

    # --2--> E_folder, folder of Estimated images
    E_folder = os.path.join(testsets, testset_current, 'x{:01d}_'.format(sf)+save_suffix)
    util.mkdir(E_folder)

    logger.info(L_folder)

    # for im in os.listdir(os.path.join(L_folder)):
    #   if (im.endswith('.jpg') or im.endswith('.bmp') or im.endswith('.png')) and 'kernel' not in im:

    # --------------------------------
    # (3) load low-resolution image
    # --------------------------------
    img_name, ext = os.path.splitext(im)
    img = util.imread_uint(os.path.join(L_folder, im), n_channels=n_channels)
    h, w = img.shape[:2]
    util.imshow(img, title='Low-resolution image') if show_img else None
    img = util.uint2single(img)
    img_L = util.single2tensor4(img)

    # --------------------------------
    # (4) do super-resolution
    # --------------------------------
    noise_level_map = torch.ones((1, 1, img_L.size(2), img_L.size(3)), dtype=torch.float).mul_(noise_level_model)
    img_L = torch.cat((img_L, noise_level_map), dim=1)
    img_L = img_L.to(device)
    # with torch.no_grad():
    img_E = model(img_L)
    img_E = util.tensor2single(img_E)

    # --------------------------------
    # (5) img_E
    # --------------------------------
    img_E = util.single2uint(img_E[:h*sf, :w*sf])  # np.uint8((z[:h*sf, :w*sf] * 255.0).round())

    logger.info('saving: sf = {}, {}.'.format(sf, img_name+'_x{}'.format(sf)+ext))
    util.imsave(img_E, os.path.join(E_folder, img_name+'_x{}'.format(sf)+ext))

    util.imshow(img_E, title='Recovered image') if show_img else None
コード例 #11
0
ファイル: demo_test_dpsr_real.py プロジェクト: allenwu97/DPSR
def main():

    # --------------------------------
    # let's start!
    # --------------------------------
    utils_logger.logger_info('test_dpsr_real', log_path='test_dpsr_real.log')
    logger = logging.getLogger('test_dpsr_real')
    global arg
    arg = parser.parse_args()
    # basic setting
    # ================================================
    sf = arg.sf
    show_img = False
    noise_level_img = 8. / 255.
    #testsets = '/home/share2/wutong/DPSR/testsets/test/'

    #im = '0000115_01031_d_0000082.jpg'  # chip.png colour.png

    # if 'chip' in im:
    #   noise_level_img = 8./255.
    # elif 'colour' in im:
    #noise_level_img = 0.5/255.

    use_srganplus = False
    if use_srganplus and sf == 4:
        model_prefix = 'DPSRGAN'
        save_suffix = 'dpsrgan'
    else:
        model_prefix = 'DPSR'
        save_suffix = 'dpsr'

    model_path = os.path.join('DPSR_models', model_prefix + 'x%01d.pth' % (sf))

    iter_num = 15  # number of iterations
    n_channels = 3  # only color images, fixed

    # ================================================

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # --------------------------------
    # (1) load trained model
    # --------------------------------

    model = SRResNet(in_nc=4,
                     out_nc=3,
                     nc=96,
                     nb=16,
                     upscale=sf,
                     act_mode='R',
                     upsample_mode='pixelshuffle')
    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)
    logger.info('Model path {:s}. Testing...'.format(model_path))

    # --------------------------------
    # (2) L_folder, E_folder
    # --------------------------------
    # --1--> L_folder, folder of Low-quality images
    L_folder = os.path.join(arg.load)  # L: Low quality

    # --2--> E_folder, folder of Estimated images
    E_folder = os.path.join(arg.save)
    util.mkdir(E_folder)

    logger.info(L_folder)

    # for im in os.listdir(os.path.join(L_folder)):
    #   if (im.endswith('.jpg') or im.endswith('.bmp') or im.endswith('.png')) and 'kernel' not in im:

    # --------------------------------
    # (3) load low-resolution image
    # --------------------------------
    img_list = os.listdir(L_folder)
    for im in img_list:
        img_path, ext = os.path.splitext(im)
        img_name = img_path.split('/')[-1]
        img = util.imread_uint(os.path.join(L_folder, im),
                               n_channels=n_channels)
        h, w = img.shape[:2]
        util.imshow(img, title='Low-resolution image') if show_img else None
        img = util.unit2single(img)

        # --------------------------------
        # (4) load blur kernel
        # --------------------------------
        # if os.path.exists(os.path.join(L_folder, img_name+'_kernel.mat')):
        # k = loadmat(os.path.join(L_folder, img_name+'.mat'))['kernel']
        #  k = k.astype(np.float64)
        #  k /= k.sum()
        # elif os.path.exists(os.path.join(L_folder, img_name+'_kernel.png')):
        #   k = cv2.imread(os.path.join(L_folder, img_name+'_kernel.png'), 0)
        #    k = np.float64(k)  # float64 !
        #    k /= k.sum()
        #else:
        k = utils_deblur.fspecial('gaussian', 5, 0.25)
        iter_num = 5

        # --------------------------------
        # (5) handle boundary
        # --------------------------------
        img = utils_deblur.wrap_boundary_liu(
            img,
            utils_deblur.opt_fft_size(
                [img.shape[0] + k.shape[0] + 1,
                 img.shape[1] + k.shape[1] + 1]))

        # --------------------------------
        # (6) get upperleft, denominator
        # --------------------------------
        upperleft, denominator = utils_deblur.get_uperleft_denominator(img, k)

        # --------------------------------
        # (7) get rhos and sigmas
        # --------------------------------
        rhos, sigmas = utils_deblur.get_rho_sigma(sigma=max(
            0.255 / 255.0, noise_level_img),
                                                  iter_num=iter_num)

        # --------------------------------
        # (8) main iteration
        # --------------------------------
        z = img
        rhos = np.float32(rhos)
        sigmas = np.float32(sigmas)

        for i in range(iter_num):

            logger.info('Iter: {:->4d}--> {}'.format(i + 1, im))
            # --------------------------------
            # step 1, Eq. (9) // FFT
            # --------------------------------
            rho = rhos[i]
            if i != 0:
                z = util.imresize_np(z, 1 / sf, True)

            z = np.real(
                np.fft.ifft2((upperleft + rho * np.fft.fft2(z, axes=(0, 1))) /
                             (denominator + rho),
                             axes=(0, 1)))

            # --------------------------------
            # step 2, Eq. (12) // super-resolver
            # --------------------------------
            sigma = torch.from_numpy(np.array(sigmas[i]))
            img_L = util.single2tensor4(z)

            noise_level_map = torch.ones((1, 1, img_L.size(2), img_L.size(3)),
                                         dtype=torch.float).mul_(sigma)
            img_L = torch.cat((img_L, noise_level_map), dim=1)
            img_L = img_L.to(device)
            # with torch.no_grad():
            z = model(img_L)
            z = util.tensor2single(z)

        # --------------------------------
        # (9) img_E
        # --------------------------------
        img_E = util.single2uint(
            z[:h * sf, :w * sf])  # np.uint8((z[:h*sf, :w*sf] * 255.0).round())

        logger.info('saving: sf = {}, {}.'.format(
            sf, img_name + '_x{}'.format(sf) + ext))
        util.imsave(img_E, os.path.join(E_folder, img_name + ext))

        util.imshow(img_E, title='Recovered image') if show_img else None
コード例 #12
0
            # -------------------------------
            # 6) testing
            # -------------------------------
            if current_step % opt['train']['checkpoint_test'] == 0 and opt['rank'] == 0:

                avg_psnr = 0.0
                idx = 0

                for test_data in test_loader:
                    idx += 1
                    image_name_ext = os.path.basename(test_data['L_path'][0])
                    img_name, ext = os.path.splitext(image_name_ext)

                    img_dir = os.path.join(opt['path']['images'], img_name)
                    util.mkdir(img_dir)

                    model.feed_data(test_data)
                    model.test()

                    visuals = model.current_visuals()
                    E_img = util.tensor2uint(visuals['E'])
                    H_img = util.tensor2uint(visuals['H'])

                    # -----------------------
                    # save estimated image E
                    # -----------------------
                    save_img_path = os.path.join(img_dir, '{:s}_{:d}.png'.format(img_name, current_step))
                    util.imsave(E_img, save_img_path)

                    # -----------------------
コード例 #13
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------
    if noise_level_model == -1:
        model_name = 'srmdnf_x' + str(sf)
    else:
        model_name = 'srmd_x' + str(sf)
    model_path = os.path.join(model_pool, model_name+'.pth')
    in_nc = 18 if 'nf' in model_name else 19

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------

    L_path = sources  # L_path, for Low-quality images
    E_path = results   # E_path, for Estimated images
    if not os.path.splitext(E_path)[1]:
        util.mkdir(E_path)

    device = torch.device(using_device)

    # ----------------------------------------
    # load model
    # ----------------------------------------

    from utils.network_srmd import SRMD as net
    model = net(in_nc=in_nc, out_nc=n_channels, nc=nc, nb=nb,
                upscale=sf, act_mode='R', upsample_mode='pixelshuffle')
    model.load_state_dict(torch.load(model_path), strict=False)
    model.eval()
    for _, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)

    if os.path.isfile(L_path):
        L_paths = [L_path]
    else:
        L_paths = util.get_image_paths(L_path)

    # ----------------------------------------
    # kernel and PCA reduced feature
    # ----------------------------------------

    # Gaussian kernel, delta kernel 0.01
    kernel = utils_deblur.fspecial('gaussian', 15, 0.01)

    P = loadmat(srmd_pca_path)['P']
    degradation_vector = np.dot(P, np.reshape(kernel, (-1), order="F"))
    if 'nf' not in model_name:  # noise-free SR
        degradation_vector = np.append(
            degradation_vector, noise_level_model/255.)
    degradation_vector = torch.from_numpy(
        degradation_vector).view(1, -1, 1, 1).float()

    for _, img in enumerate(L_paths):
        img_name, _ = os.path.splitext(os.path.basename(img))
        try:
            # ------------------------------------
            # (1) img_L
            # ------------------------------------
            img_L, alpha = util.imread_uint_alpha(img, n_channels=n_channels)
            # Bicubic to handle alpha channel if the intended picture is supposed to have.
            if not alpha is None and picture_format == "png":
                alpha = util.uint2tensor4(alpha)
                alpha = torch.nn.functional.interpolate(
                    alpha, scale_factor=sf, mode='bicubic', align_corners=False)
                alpha = alpha.to(device)
                alpha = torch.clamp(alpha, 0, 255)
                alpha = util.tensor2uint(alpha) 
            img_L = util.uint2tensor4(img_L)
            degradation_map = degradation_vector.repeat(
                1, 1, img_L.size(-2), img_L.size(-1))
            img_L = torch.cat((img_L, degradation_map), dim=1)
            img_L = img_L.to(device)

            # ------------------------------------
            # (2) img_E
            # ------------------------------------

            if not x8:
                img_E = model(img_L)
            else:
                img_E = utils_model.test_mode(model, img_L, mode=3, sf=sf)

            img_E = util.tensor2uint(img_E)
            if not alpha is None and picture_format == "png":
                alpha = alpha.reshape((alpha.shape[0], alpha.shape[1], 1))
                img_E = np.concatenate((img_E, alpha), axis=2)
            elif not alpha is None:
                print("Warning! You lost your alpha channel for this picture!")

            # ------------------------------------
            # save results
            # ------------------------------------
            if os.path.splitext(E_path)[1]:
                util.imsave(img_E, E_path)
            else:
                util.imsave(img_E, os.path.join(
                    E_path, img_name+'.' + picture_format))
            print(os.path.basename(img) + " successfully saved to disk!")
        except Exception:
            traceback.print_exc()
            print(os.path.basename(img) + " failed!")
コード例 #14
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------

    noise_level_img = 15  # set AWGN noise level for noisy image
    noise_level_model = noise_level_img  # set noise level for model
    model_name = 'drunet_gray'  # set denoiser model, 'drunet_gray' | 'drunet_color'
    testset_name = 'bsd68'  # set test set,  'bsd68' | 'cbsd68' | 'set12'
    x8 = False  # default: False, x8 to boost performance
    show_img = False  # default: False
    border = 0  # shave boader to calculate PSNR and SSIM

    if 'color' in model_name:
        n_channels = 3  # 3 for color image
    else:
        n_channels = 1  # 1 for grayscale image

    model_pool = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    task_current = 'dn'  # 'dn' for denoising
    result_name = testset_name + '_' + task_current + '_' + model_name

    model_path = os.path.join(model_pool, model_name + '.pth')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.cuda.empty_cache()

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------

    L_path = os.path.join(testsets,
                          testset_name)  # L_path, for Low-quality images
    E_path = os.path.join(results, result_name)  # E_path, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    # ----------------------------------------
    # load model
    # ----------------------------------------

    from models.network_unet import UNetRes as net
    model = net(in_nc=n_channels + 1,
                out_nc=n_channels,
                nc=[64, 128, 256, 512],
                nb=4,
                act_mode='R',
                downsample_mode="strideconv",
                upsample_mode="convtranspose")
    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)
    logger.info('Model path: {:s}'.format(model_path))
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    logger.info('Params number: {}'.format(number_parameters))

    test_results = OrderedDict()
    test_results['psnr'] = []
    test_results['ssim'] = []

    logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format(
        model_name, noise_level_img, noise_level_model))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    for idx, img in enumerate(L_paths):

        # ------------------------------------
        # (1) img_L
        # ------------------------------------

        img_name, ext = os.path.splitext(os.path.basename(img))
        # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext))
        img_H = util.imread_uint(img, n_channels=n_channels)
        img_L = util.uint2single(img_H)

        # Add noise without clipping
        np.random.seed(seed=0)  # for reproducibility
        img_L += np.random.normal(0, noise_level_img / 255., img_L.shape)

        util.imshow(util.single2uint(img_L),
                    title='Noisy image with noise level {}'.format(
                        noise_level_img)) if show_img else None

        img_L = util.single2tensor4(img_L)
        img_L = torch.cat(
            (img_L, torch.FloatTensor([noise_level_model / 255.]).repeat(
                1, 1, img_L.shape[2], img_L.shape[3])),
            dim=1)
        img_L = img_L.to(device)

        # ------------------------------------
        # (2) img_E
        # ------------------------------------

        if not x8 and img_L.size(2) // 8 == 0 and img_L.size(3) // 8 == 0:
            img_E = model(img_L)
        elif not x8 and (img_L.size(2) // 8 != 0 or img_L.size(3) // 8 != 0):
            img_E = utils_model.test_mode(model, img_L, refield=64, mode=5)
        elif x8:
            img_E = utils_model.test_mode(model, img_L, mode=3)

        img_E = util.tensor2uint(img_E)

        # --------------------------------
        # PSNR and SSIM
        # --------------------------------

        if n_channels == 1:
            img_H = img_H.squeeze()
        psnr = util.calculate_psnr(img_E, img_H, border=border)
        ssim = util.calculate_ssim(img_E, img_H, border=border)
        test_results['psnr'].append(psnr)
        test_results['ssim'].append(ssim)
        logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format(
            img_name + ext, psnr, ssim))

        # ------------------------------------
        # save results
        # ------------------------------------

        util.imsave(img_E, os.path.join(E_path, img_name + ext))

    ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
    ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
    logger.info(
        'Average PSNR/SSIM(RGB) - {} - PSNR: {:.2f} dB; SSIM: {:.4f}'.format(
            result_name, ave_psnr, ave_ssim))
コード例 #15
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------
    noise_level_img = 50             # noise level for noisy image
    model_name = 'ircnn_gray'        # 'ircnn_gray' | 'ircnn_color'
    testset_name = 'set12'          # test set, 'bsd68' | 'set12'
    need_degradation = True          # default: True
    x8 = False                       # default: False, x8 to boost performance
    show_img = False                 # default: False
    current_idx = min(24, np.int(np.ceil(noise_level_img/2)-1)) # current_idx+1 th denoiser


    task_current = 'dn'       # fixed, 'dn' for denoising | 'sr' for super-resolution
    sf = 1                    # unused for denoising
    if 'color' in model_name:
        n_channels = 3        # fixed, 1 for grayscale image, 3 for color image 
    else:
        n_channels = 1        # fixed for grayscale image 

    model_pool = 'model_zoo'  # fixed
    testsets = 'testsets'     # fixed
    results = 'results'       # fixed
    result_name = testset_name + '_' + model_name     # fixed
    border = sf if task_current == 'sr' else 0        # shave boader to calculate PSNR and SSIM
    model_path = os.path.join(model_pool, model_name+'.pth')

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------
    L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images
    H_path = L_path                               # H_path, for High-quality images
    E_path = os.path.join(results, result_name)   # E_path, for Estimated images
    util.mkdir(E_path)

    if H_path == L_path:
        need_degradation = True
    logger_name = result_name
    utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log'))
    logger = logging.getLogger(logger_name)

    need_H = True if H_path is not None else False
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------
    model25 = torch.load(model_path)
    from models.network_dncnn import IRCNN as net
    model = net(in_nc=n_channels, out_nc=n_channels, nc=64)
    model.load_state_dict(model25[str(current_idx)], strict=True)
    model.eval()
    for _, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)
    logger.info('Model path: {:s}'.format(model_path))
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    logger.info('Params number: {}'.format(number_parameters))

    test_results = OrderedDict()
    test_results['psnr'] = []
    test_results['ssim'] = []

    logger.info('model_name:{}, image sigma:{}'.format(model_name, noise_level_img))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)
    H_paths = util.get_image_paths(H_path) if need_H else None

    for idx, img in enumerate(L_paths):

        # ------------------------------------
        # (1) img_L
        # ------------------------------------
        img_name, ext = os.path.splitext(os.path.basename(img))
        # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext))
        img_L = util.imread_uint(img, n_channels=n_channels)
        img_L = util.uint2single(img_L)

        if need_degradation:  # degradation process
            np.random.seed(seed=0)  # for reproducibility
            img_L += np.random.normal(0, noise_level_img/255., img_L.shape)

        util.imshow(util.single2uint(img_L), title='Noisy image with noise level {}'.format(noise_level_img)) if show_img else None

        img_L = util.single2tensor4(img_L)
        img_L = img_L.to(device)

        # ------------------------------------
        # (2) img_E
        # ------------------------------------
        if not x8:
            img_E = model(img_L)
        else:
            img_E = utils_model.test_mode(model, img_L, mode=3)

        img_E = util.tensor2uint(img_E)

        if need_H:

            # --------------------------------
            # (3) img_H
            # --------------------------------
            img_H = util.imread_uint(H_paths[idx], n_channels=n_channels)
            img_H = img_H.squeeze()

            # --------------------------------
            # PSNR and SSIM
            # --------------------------------
            psnr = util.calculate_psnr(img_E, img_H, border=border)
            ssim = util.calculate_ssim(img_E, img_H, border=border)
            test_results['psnr'].append(psnr)
            test_results['ssim'].append(ssim)
            logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format(img_name+ext, psnr, ssim))
            util.imshow(np.concatenate([img_E, img_H], axis=1), title='Recovered / Ground-truth') if show_img else None

        # ------------------------------------
        # save results
        # ------------------------------------
        util.imsave(img_E, os.path.join(E_path, img_name+ext))

    if need_H:
        ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
        ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
        logger.info('Average PSNR/SSIM(RGB) - {} - PSNR: {:.2f} dB; SSIM: {:.4f}'.format(result_name, ave_psnr, ave_ssim))
コード例 #16
0
ファイル: main_dpir_deblur.py プロジェクト: wuzhan11/DPIR
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------

    noise_level_img = 7.65 / 255.0  # default: 0, noise level for LR image
    noise_level_model = noise_level_img  # noise level of model, default 0
    model_name = 'drunet_gray'  # 'drunet_gray' | 'drunet_color' | 'ircnn_gray' | 'ircnn_color'
    testset_name = 'Set3C'  # test set,  'set5' | 'srbsd68'
    x8 = True  # default: False, x8 to boost performance
    iter_num = 8  # number of iterations
    modelSigma1 = 49
    modelSigma2 = noise_level_model * 255.

    show_img = False  # default: False
    save_L = True  # save LR image
    save_E = True  # save estimated image
    save_LEH = False  # save zoomed LR, E and H images
    border = 0

    # --------------------------------
    # load kernel
    # --------------------------------

    kernels = hdf5storage.loadmat(os.path.join('kernels',
                                               'Levin09.mat'))['kernels']

    sf = 1
    task_current = 'deblur'  # 'deblur' for deblurring
    n_channels = 3 if 'color' in model_name else 1  # fixed
    model_zoo = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    result_name = testset_name + '_' + task_current + '_' + model_name
    model_path = os.path.join(model_zoo, model_name + '.pth')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.cuda.empty_cache()

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------

    L_path = os.path.join(testsets,
                          testset_name)  # L_path, for Low-quality images
    E_path = os.path.join(results, result_name)  # E_path, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    # ----------------------------------------
    # load model
    # ----------------------------------------

    if 'drunet' in model_name:
        from models.network_unet import UNetRes as net
        model = net(in_nc=n_channels + 1,
                    out_nc=n_channels,
                    nc=[64, 128, 256, 512],
                    nb=4,
                    act_mode='R',
                    downsample_mode="strideconv",
                    upsample_mode="convtranspose")
        model.load_state_dict(torch.load(model_path), strict=True)
        model.eval()
        for _, v in model.named_parameters():
            v.requires_grad = False
        model = model.to(device)
    elif 'ircnn' in model_name:
        from models.network_dncnn import IRCNN as net
        model = net(in_nc=n_channels, out_nc=n_channels, nc=64)
        model25 = torch.load(model_path)
        former_idx = 0

    logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(
        model_name, noise_level_img, noise_level_model))
    logger.info('Model path: {:s}'.format(model_path))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    test_results_ave = OrderedDict()
    test_results_ave['psnr'] = []  # record average PSNR for each kernel

    for k_index in range(kernels.shape[1]):

        logger.info('-------k:{:>2d} ---------'.format(k_index))
        test_results = OrderedDict()
        test_results['psnr'] = []
        k = kernels[0, k_index].astype(np.float64)
        util.imshow(k) if show_img else None

        for idx, img in enumerate(L_paths):

            # --------------------------------
            # (1) get img_L
            # --------------------------------

            img_name, ext = os.path.splitext(os.path.basename(img))
            img_H = util.imread_uint(img, n_channels=n_channels)
            img_H = util.modcrop(img_H, 8)  # modcrop

            img_L = ndimage.filters.convolve(img_H,
                                             np.expand_dims(k, axis=2),
                                             mode='wrap')
            util.imshow(img_L) if show_img else None
            img_L = util.uint2single(img_L)

            np.random.seed(seed=0)  # for reproducibility
            img_L += np.random.normal(0, noise_level_img,
                                      img_L.shape)  # add AWGN

            # --------------------------------
            # (2) get rhos and sigmas
            # --------------------------------

            rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255.,
                                                       noise_level_model),
                                             iter_num=iter_num,
                                             modelSigma1=modelSigma1,
                                             modelSigma2=modelSigma2,
                                             w=1.0)
            rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(
                sigmas).to(device)

            # --------------------------------
            # (3) initialize x, and pre-calculation
            # --------------------------------

            x = util.single2tensor4(img_L).to(device)

            img_L_tensor, k_tensor = util.single2tensor4(
                img_L), util.single2tensor4(np.expand_dims(k, 2))
            [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor],
                                                     device)
            FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf)

            # --------------------------------
            # (4) main iterations
            # --------------------------------

            for i in range(iter_num):

                # --------------------------------
                # step 1, FFT
                # --------------------------------

                tau = rhos[i].float().repeat(1, 1, 1, 1)
                x = sr.data_solution(x, FB, FBC, F2B, FBFy, tau, sf)

                if 'ircnn' in model_name:
                    current_idx = np.int(
                        np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1)

                    if current_idx != former_idx:
                        model.load_state_dict(model25[str(current_idx)],
                                              strict=True)
                        model.eval()
                        for _, v in model.named_parameters():
                            v.requires_grad = False
                        model = model.to(device)
                    former_idx = current_idx

                # --------------------------------
                # step 2, denoiser
                # --------------------------------

                if x8:
                    x = util.augment_img_tensor4(x, i % 8)

                if 'drunet' in model_name:
                    x = torch.cat((x, sigmas[i].float().repeat(
                        1, 1, x.shape[2], x.shape[3])),
                                  dim=1)
                    x = utils_model.test_mode(model,
                                              x,
                                              mode=2,
                                              refield=32,
                                              min_size=256,
                                              modulo=16)
                elif 'ircnn' in model_name:
                    x = model(x)

                if x8:
                    if i % 8 == 3 or i % 8 == 5:
                        x = util.augment_img_tensor4(x, 8 - i % 8)
                    else:
                        x = util.augment_img_tensor4(x, i % 8)

            # --------------------------------
            # (3) img_E
            # --------------------------------

            img_E = util.tensor2uint(x)
            if n_channels == 1:
                img_H = img_H.squeeze()

            if save_E:
                util.imsave(
                    img_E,
                    os.path.join(
                        E_path, img_name + '_k' + str(k_index) + '_' +
                        model_name + '.png'))

            # --------------------------------
            # (4) img_LEH
            # --------------------------------

            if save_LEH:
                img_L = util.single2uint(img_L)
                k_v = k / np.max(k) * 1.0
                k_v = util.single2uint(np.tile(k_v[..., np.newaxis],
                                               [1, 1, 3]))
                k_v = cv2.resize(k_v, (3 * k_v.shape[1], 3 * k_v.shape[0]),
                                 interpolation=cv2.INTER_NEAREST)
                img_I = cv2.resize(img_L,
                                   (sf * img_L.shape[1], sf * img_L.shape[0]),
                                   interpolation=cv2.INTER_NEAREST)
                img_I[:k_v.shape[0], -k_v.shape[1]:, :] = k_v
                img_I[:img_L.shape[0], :img_L.shape[1], :] = img_L
                util.imshow(np.concatenate([img_I, img_E, img_H], axis=1),
                            title='LR / Recovered / Ground-truth'
                            ) if show_img else None
                util.imsave(
                    np.concatenate([img_I, img_E, img_H], axis=1),
                    os.path.join(E_path,
                                 img_name + '_k' + str(k_index) + '_LEH.png'))

            if save_L:
                util.imsave(
                    util.single2uint(img_L),
                    os.path.join(E_path,
                                 img_name + '_k' + str(k_index) + '_LR.png'))

            psnr = util.calculate_psnr(
                img_E, img_H, border=border)  # change with your own border
            test_results['psnr'].append(psnr)
            logger.info('{:->4d}--> {:>10s} --k:{:>2d} PSNR: {:.2f}dB'.format(
                idx + 1, img_name + ext, k_index, psnr))

        # --------------------------------
        # Average PSNR
        # --------------------------------

        ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
        logger.info(
            '------> Average PSNR of ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB'
            .format(testset_name, k_index, noise_level_model, ave_psnr))
        test_results_ave['psnr'].append(ave_psnr)
コード例 #17
0
ファイル: main_challenge_sr.py プロジェクト: xiaohuoer/KAIR
def main():

    utils_logger.logger_info('efficientsr_challenge',
                             log_path='efficientsr_challenge.log')
    logger = logging.getLogger('efficientsr_challenge')

    #    print(torch.__version__)               # pytorch version
    #    print(torch.version.cuda)              # cuda version
    #    print(torch.backends.cudnn.version())  # cudnn version

    # --------------------------------
    # basic settings
    # --------------------------------
    model_names = ['msrresnet', 'imdn']
    model_id = 1  # set the model name
    model_name = model_names[model_id]
    logger.info('{:>16s} : {:s}'.format('Model Name', model_name))

    testsets = 'testsets'  # set path of testsets
    testset_L = 'DIV2K_valid_LR'  # set current testing dataset; 'DIV2K_test_LR'
    testset_L = 'set12'

    save_results = True
    print_modelsummary = True  # set False when calculating `Max Memery` and `Runtime`

    torch.cuda.set_device(0)  # set GPU ID
    logger.info('{:>16s} : {:<d}'.format('GPU ID',
                                         torch.cuda.current_device()))
    torch.cuda.empty_cache()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # --------------------------------
    # define network and load model
    # --------------------------------
    if model_name == 'msrresnet':
        from models.network_msrresnet import MSRResNet1 as net
        model = net(in_nc=3, out_nc=3, nc=64, nb=16,
                    upscale=4)  # define network
        model_path = os.path.join('model_zoo',
                                  'msrresnet_x4_psnr.pth')  # set model path
    elif model_name == 'imdn':
        from models.network_imdn import IMDN as net
        model = net(in_nc=3,
                    out_nc=3,
                    nc=64,
                    nb=8,
                    upscale=4,
                    act_mode='L',
                    upsample_mode='pixelshuffle')  # define network
        model_path = os.path.join('model_zoo', 'imdn_x4.pth')  # set model path

    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)

    # --------------------------------
    # print model summary
    # --------------------------------
    if print_modelsummary:
        from utils.utils_modelsummary import get_model_activation, get_model_flops
        input_dim = (3, 256, 256)  # set the input dimension

        activations, num_conv2d = get_model_activation(model, input_dim)
        logger.info('{:>16s} : {:<.4f} [M]'.format('#Activations',
                                                   activations / 10**6))
        logger.info('{:>16s} : {:<d}'.format('#Conv2d', num_conv2d))

        flops = get_model_flops(model, input_dim, False)
        logger.info('{:>16s} : {:<.4f} [G]'.format('FLOPs', flops / 10**9))

        num_parameters = sum(map(lambda x: x.numel(), model.parameters()))
        logger.info('{:>16s} : {:<.4f} [M]'.format('#Params',
                                                   num_parameters / 10**6))

    # --------------------------------
    # read image
    # --------------------------------
    L_path = os.path.join(testsets, testset_L)
    E_path = os.path.join(testsets, testset_L + '_' + model_name)
    util.mkdir(E_path)

    # record runtime
    test_results = OrderedDict()
    test_results['runtime'] = []

    logger.info('{:>16s} : {:s}'.format('Input Path', L_path))
    logger.info('{:>16s} : {:s}'.format('Output Path', E_path))
    idx = 0

    start = torch.cuda.Event(enable_timing=True)
    end = torch.cuda.Event(enable_timing=True)

    for img in util.get_image_paths(L_path):

        # --------------------------------
        # (1) img_L
        # --------------------------------
        idx += 1
        img_name, ext = os.path.splitext(os.path.basename(img))
        logger.info('{:->4d}--> {:>10s}'.format(idx, img_name + ext))

        img_L = util.imread_uint(img, n_channels=3)
        img_L = util.uint2tensor4(img_L)
        torch.cuda.empty_cache()
        img_L = img_L.to(device)

        start.record()
        img_E = model(img_L)
        # logger.info('{:>16s} : {:<.3f} [M]'.format('Max Memery', torch.cuda.max_memory_allocated(torch.cuda.current_device())/1024**2))  # Memery
        end.record()
        torch.cuda.synchronize()
        test_results['runtime'].append(start.elapsed_time(end))  # milliseconds

        #        torch.cuda.synchronize()
        #        start = time.time()
        #        img_E = model(img_L)
        #        torch.cuda.synchronize()
        #        end = time.time()
        #        test_results['runtime'].append(end-start)  # seconds

        # --------------------------------
        # (2) img_E
        # --------------------------------
        img_E = util.tensor2uint(img_E)

        if save_results:
            util.imsave(img_E, os.path.join(E_path, img_name + ext))
    ave_runtime = sum(test_results['runtime']) / len(
        test_results['runtime']) / 1000.0
    logger.info('------> Average runtime of ({}) is : {:.6f} seconds'.format(
        L_path, ave_runtime))
コード例 #18
0
ファイル: test.py プロジェクト: zjucmx/RFDN
def main():

    utils_logger.logger_info('AIM-track', log_path='AIM-track.log')
    logger = logging.getLogger('AIM-track')

    # --------------------------------
    # basic settings
    # --------------------------------
    testsets = 'DIV2K'
    testset_L = 'DIV2K_valid_LR_bicubic'
    #testset_L = 'DIV2K_test_LR_bicubic'

    torch.cuda.current_device()
    torch.cuda.empty_cache()
    #torch.backends.cudnn.benchmark = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # --------------------------------
    # load model
    # --------------------------------
    model_path = os.path.join('trained_model', 'RFDN_AIM.pth')
    model = RFDN()
    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)

    # number of parameters
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    logger.info('Params number: {}'.format(number_parameters))

    # --------------------------------
    # read image
    # --------------------------------
    L_folder = os.path.join(testsets, testset_L, 'X4')
    E_folder = os.path.join(testsets, testset_L+'_results')
    util.mkdir(E_folder)

    # record PSNR, runtime
    test_results = OrderedDict()
    test_results['runtime'] = []

    logger.info(L_folder)
    logger.info(E_folder)
    idx = 0

    start = torch.cuda.Event(enable_timing=True)
    end = torch.cuda.Event(enable_timing=True)

    img_SR = []
    for img in util.get_image_paths(L_folder):

        # --------------------------------
        # (1) img_L
        # --------------------------------
        idx += 1
        img_name, ext = os.path.splitext(os.path.basename(img))
        logger.info('{:->4d}--> {:>10s}'.format(idx, img_name+ext))

        img_L = util.imread_uint(img, n_channels=3)
        img_L = util.uint2tensor4(img_L)
        img_L = img_L.to(device)

        start.record()
        img_E = model(img_L)
        end.record()
        torch.cuda.synchronize()
        test_results['runtime'].append(start.elapsed_time(end))  # milliseconds

        # --------------------------------
        # (2) img_E
        # --------------------------------
        img_E = util.tensor2uint(img_E)
        img_SR.append(img_E)

        # --------------------------------
        # (3) save results
        # --------------------------------
        #util.imsave(img_E, os.path.join(E_folder, img_name+ext))

    ave_runtime = sum(test_results['runtime']) / len(test_results['runtime']) / 1000.0
    logger.info('------> Average runtime of ({}) is : {:.6f} seconds'.format(L_folder, ave_runtime))

    # --------------------------------
    # (4) calculate psnr
    # --------------------------------
    '''
コード例 #19
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------

    noise_level_img = 0/255.0            # set AWGN noise level for LR image, default: 0, 
    noise_level_model = noise_level_img  # setnoise level of model, default 0
    model_name = 'drunet_color'          # set denoiser, | 'drunet_color' | 'ircnn_gray' | 'drunet_gray' | 'ircnn_color'
    testset_name = 'srbsd68'             # set test set,  'set5' | 'srbsd68'
    x8 = True                            # default: False, x8 to boost performance
    test_sf = [2]                        # set scale factor, default: [2, 3, 4], [2], [3], [4]
    iter_num = 24                        # set number of iterations, default: 24 for SISR
    modelSigma1 = 49                     # set sigma_1, default: 49
    classical_degradation = True         # set classical degradation or bicubic degradation

    show_img = False                     # default: False
    save_L = True                        # save LR image
    save_E = True                        # save estimated image
    save_LEH = False                     # save zoomed LR, E and H images

    task_current = 'sr'                  # 'sr' for super-resolution
    n_channels = 1 if 'gray' in model_name else 3  # fixed
    model_zoo = 'model_zoo'              # fixed
    testsets = 'testsets'                # fixed
    results = 'results'                  # fixed
    result_name = testset_name + '_' + task_current + '_' + model_name
    model_path = os.path.join(model_zoo, model_name+'.pth')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.cuda.empty_cache()

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------

    L_path = os.path.join(testsets, testset_name) # L_path, for Low-quality images
    E_path = os.path.join(results, result_name)   # E_path, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log'))
    logger = logging.getLogger(logger_name)

    # ----------------------------------------
    # load model
    # ----------------------------------------

    if 'drunet' in model_name:
        from models.network_unet import UNetRes as net
        model = net(in_nc=n_channels+1, out_nc=n_channels, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode="strideconv", upsample_mode="convtranspose")
        model.load_state_dict(torch.load(model_path), strict=True)
        model.eval()
        for _, v in model.named_parameters():
            v.requires_grad = False
        model = model.to(device)
    elif 'ircnn' in model_name:
        from models.network_dncnn import IRCNN as net
        model = net(in_nc=n_channels, out_nc=n_channels, nc=64)
        model25 = torch.load(model_path)
        former_idx = 0

    logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(model_name, noise_level_img, noise_level_model))
    logger.info('Model path: {:s}'.format(model_path))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    # --------------------------------
    # load kernel
    # --------------------------------

    # kernels = hdf5storage.loadmat(os.path.join('kernels', 'Levin09.mat'))['kernels']
    if classical_degradation:
        kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']
    else:
        kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernel_bicubicx234.mat'))['kernels']

    test_results_ave = OrderedDict()
    test_results_ave['psnr_sf_k'] = []
    test_results_ave['psnr_y_sf_k'] = []

    for sf in test_sf:
        border = sf
        modelSigma2 = max(sf, noise_level_model*255.)
        k_num = 8 if classical_degradation else 1

        for k_index in range(k_num):
            logger.info('--------- sf:{:>1d} --k:{:>2d} ---------'.format(sf, k_index))
            test_results = OrderedDict()
            test_results['psnr'] = []
            test_results['psnr_y'] = []

            if not classical_degradation:  # for bicubic degradation
                k_index = sf-2
            k = kernels[0, k_index].astype(np.float64)

            util.surf(k) if show_img else None

            for idx, img in enumerate(L_paths):

                # --------------------------------
                # (1) get img_L
                # --------------------------------

                img_name, ext = os.path.splitext(os.path.basename(img))
                img_H = util.imread_uint(img, n_channels=n_channels)
                img_H = util.modcrop(img_H, sf)  # modcrop

                if classical_degradation:
                    img_L = sr.classical_degradation(img_H, k, sf)
                    util.imshow(img_L) if show_img else None
                    img_L = util.uint2single(img_L)
                else:
                    img_L = util.imresize_np(util.uint2single(img_H), 1/sf)

                np.random.seed(seed=0)  # for reproducibility
                img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN

                # --------------------------------
                # (2) get rhos and sigmas
                # --------------------------------

                rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255/255., noise_level_model), iter_num=iter_num, modelSigma1=modelSigma1, modelSigma2=modelSigma2, w=1)
                rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to(device)

                # --------------------------------
                # (3) initialize x, and pre-calculation
                # --------------------------------

                x = cv2.resize(img_L, (img_L.shape[1]*sf, img_L.shape[0]*sf), interpolation=cv2.INTER_CUBIC)
                if np.ndim(x)==2:
                    x = x[..., None]

                if classical_degradation:
                    x = sr.shift_pixel(x, sf)
                x = util.single2tensor4(x).to(device)

                img_L_tensor, k_tensor = util.single2tensor4(img_L), util.single2tensor4(np.expand_dims(k, 2))
                [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor], device)
                FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf)

                # --------------------------------
                # (4) main iterations
                # --------------------------------

                for i in range(iter_num):

                    # --------------------------------
                    # step 1, FFT
                    # --------------------------------

                    tau = rhos[i].float().repeat(1, 1, 1, 1)
                    x = sr.data_solution(x.float(), FB, FBC, F2B, FBFy, tau, sf)

                    if 'ircnn' in model_name:
                        current_idx = np.int(np.ceil(sigmas[i].cpu().numpy()*255./2.)-1)
            
                        if current_idx != former_idx:
                            model.load_state_dict(model25[str(current_idx)], strict=True)
                            model.eval()
                            for _, v in model.named_parameters():
                                v.requires_grad = False
                            model = model.to(device)
                        former_idx = current_idx

                    # --------------------------------
                    # step 2, denoiser
                    # --------------------------------

                    if x8:
                        x = util.augment_img_tensor4(x, i % 8)
                        
                    if 'drunet' in model_name:
                        x = torch.cat((x, sigmas[i].float().repeat(1, 1, x.shape[2], x.shape[3])), dim=1)
                        x = utils_model.test_mode(model, x, mode=2, refield=32, min_size=256, modulo=16)
                    elif 'ircnn' in model_name:
                        x = model(x)

                    if x8:
                        if i % 8 == 3 or i % 8 == 5:
                            x = util.augment_img_tensor4(x, 8 - i % 8)
                        else:
                            x = util.augment_img_tensor4(x, i % 8)

                # --------------------------------
                # (3) img_E
                # --------------------------------

                img_E = util.tensor2uint(x)

                if save_E:
                    util.imsave(img_E, os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_'+model_name+'.png'))

                if n_channels == 1:
                    img_H = img_H.squeeze()

                # --------------------------------
                # (4) img_LEH
                # --------------------------------

                img_L = util.single2uint(img_L).squeeze()

                if save_LEH:
                    k_v = k/np.max(k)*1.0
                    if n_channels==1:
                        k_v = util.single2uint(k_v)
                    else:
                        k_v = util.single2uint(np.tile(k_v[..., np.newaxis], [1, 1, n_channels]))
                    k_v = cv2.resize(k_v, (3*k_v.shape[1], 3*k_v.shape[0]), interpolation=cv2.INTER_NEAREST)
                    img_I = cv2.resize(img_L, (sf*img_L.shape[1], sf*img_L.shape[0]), interpolation=cv2.INTER_NEAREST)
                    img_I[:k_v.shape[0], -k_v.shape[1]:, ...] = k_v
                    img_I[:img_L.shape[0], :img_L.shape[1], ...] = img_L
                    util.imshow(np.concatenate([img_I, img_E, img_H], axis=1), title='LR / Recovered / Ground-truth') if show_img else None
                    util.imsave(np.concatenate([img_I, img_E, img_H], axis=1), os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_LEH.png'))

                if save_L:
                    util.imsave(img_L, os.path.join(E_path, img_name+'_x'+str(sf)+'_k'+str(k_index)+'_LR.png'))

                psnr = util.calculate_psnr(img_E, img_H, border=border)
                test_results['psnr'].append(psnr)
                logger.info('{:->4d}--> {:>10s} -- sf:{:>1d} --k:{:>2d} PSNR: {:.2f}dB'.format(idx+1, img_name+ext, sf, k_index, psnr))

                if n_channels == 3:
                    img_E_y = util.rgb2ycbcr(img_E, only_y=True)
                    img_H_y = util.rgb2ycbcr(img_H, only_y=True)
                    psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border)
                    test_results['psnr_y'].append(psnr_y)

            # --------------------------------
            # Average PSNR for all kernels
            # --------------------------------

            ave_psnr_k = sum(test_results['psnr']) / len(test_results['psnr'])
            logger.info('------> Average PSNR(RGB) of ({}) scale factor: ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB'.format(testset_name, sf, k_index, noise_level_model, ave_psnr_k))
            test_results_ave['psnr_sf_k'].append(ave_psnr_k)

            if n_channels == 3:  # RGB image
                ave_psnr_y_k = sum(test_results['psnr_y']) / len(test_results['psnr_y'])
                logger.info('------> Average PSNR(Y) of ({}) scale factor: ({}), kernel: ({}) sigma: ({:.2f}): {:.2f} dB'.format(testset_name, sf, k_index, noise_level_model, ave_psnr_y_k))
                test_results_ave['psnr_y_sf_k'].append(ave_psnr_y_k)

    # ---------------------------------------
    # Average PSNR for all sf and kernels
    # ---------------------------------------

    ave_psnr_sf_k = sum(test_results_ave['psnr_sf_k']) / len(test_results_ave['psnr_sf_k'])
    logger.info('------> Average PSNR of ({}) {:.2f} dB'.format(testset_name, ave_psnr_sf_k))
    if n_channels == 3:
        ave_psnr_y_sf_k = sum(test_results_ave['psnr_y_sf_k']) / len(test_results_ave['psnr_y_sf_k'])
        logger.info('------> Average PSNR of ({}) {:.2f} dB'.format(testset_name, ave_psnr_y_sf_k))
コード例 #20
0
def main():
    """
    # ----------------------------------------------------------------------------------
    # In real applications, you should set proper 
    # - "noise_level_img": from [3, 25], set 3 for clean image, try 15 for very noisy LR images
    # - "k" (or "kernel_width"): blur kernel is very important!!!  kernel_width from [0.6, 3.0]
    # to get the best performance.
    # ----------------------------------------------------------------------------------
    """
    ##############################################################################

    testset_name = 'Set3C'  # set test set,  'set5' | 'srbsd68'
    noise_level_img = 3  # set noise level of image, from [3, 25], set 3 for clean image
    model_name = 'drunet_color'  # 'ircnn_color'         # set denoiser, | 'drunet_color' | 'ircnn_gray' | 'drunet_gray' | 'ircnn_color'
    sf = 2  # set scale factor, 1, 2, 3, 4
    iter_num = 24  # set number of iterations, default: 24 for SISR

    # --------------------------------
    # set blur kernel
    # --------------------------------
    kernel_width_default_x1234 = [
        0.6, 0.9, 1.7, 2.2
    ]  # Gaussian kernel widths for x1, x2, x3, x4
    noise_level_model = noise_level_img / 255.  # noise level of model
    kernel_width = kernel_width_default_x1234[sf - 1]
    """
    # set your own kernel width !!!!!!!!!!
    """
    # kernel_width = 1.0

    k = utils_deblur.fspecial('gaussian', 25, kernel_width)
    k = sr.shift_pixel(k, sf)  # shift the kernel
    k /= np.sum(k)

    ##############################################################################

    show_img = False
    util.surf(k) if show_img else None
    x8 = True  # default: False, x8 to boost performance
    modelSigma1 = 49  # set sigma_1, default: 49
    modelSigma2 = max(sf, noise_level_model * 255.)
    classical_degradation = True  # set classical degradation or bicubic degradation

    task_current = 'sr'  # 'sr' for super-resolution
    n_channels = 1 if 'gray' in model_name else 3  # fixed
    model_zoo = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    result_name = testset_name + '_realapplications_' + task_current + '_' + model_name
    model_path = os.path.join(model_zoo, model_name + '.pth')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.cuda.empty_cache()

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------
    L_path = os.path.join(testsets,
                          testset_name)  # L_path, for Low-quality images
    E_path = os.path.join(results, result_name)  # E_path, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    # ----------------------------------------
    # load model
    # ----------------------------------------
    if 'drunet' in model_name:
        from models.network_unet import UNetRes as net
        model = net(in_nc=n_channels + 1,
                    out_nc=n_channels,
                    nc=[64, 128, 256, 512],
                    nb=4,
                    act_mode='R',
                    downsample_mode="strideconv",
                    upsample_mode="convtranspose")
        model.load_state_dict(torch.load(model_path), strict=True)
        model.eval()
        for _, v in model.named_parameters():
            v.requires_grad = False
        model = model.to(device)
    elif 'ircnn' in model_name:
        from models.network_dncnn import IRCNN as net
        model = net(in_nc=n_channels, out_nc=n_channels, nc=64)
        model25 = torch.load(model_path)
        former_idx = 0

    logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(
        model_name, noise_level_img, noise_level_model))
    logger.info('Model path: {:s}'.format(model_path))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)

    for idx, img in enumerate(L_paths):

        # --------------------------------
        # (1) get img_L
        # --------------------------------
        logger.info('Model path: {:s} Image: {:s}'.format(model_path, img))
        img_name, ext = os.path.splitext(os.path.basename(img))
        img_L = util.imread_uint(img, n_channels=n_channels)
        img_L = util.uint2single(img_L)
        img_L = util.modcrop(img_L, 8)  # modcrop

        # --------------------------------
        # (2) get rhos and sigmas
        # --------------------------------
        rhos, sigmas = pnp.get_rho_sigma(sigma=max(0.255 / 255.,
                                                   noise_level_model),
                                         iter_num=iter_num,
                                         modelSigma1=modelSigma1,
                                         modelSigma2=modelSigma2,
                                         w=1)
        rhos, sigmas = torch.tensor(rhos).to(device), torch.tensor(sigmas).to(
            device)

        # --------------------------------
        # (3) initialize x, and pre-calculation
        # --------------------------------
        x = cv2.resize(img_L, (img_L.shape[1] * sf, img_L.shape[0] * sf),
                       interpolation=cv2.INTER_CUBIC)

        if np.ndim(x) == 2:
            x = x[..., None]

        if classical_degradation:
            x = sr.shift_pixel(x, sf)
        x = util.single2tensor4(x).to(device)

        img_L_tensor, k_tensor = util.single2tensor4(
            img_L), util.single2tensor4(np.expand_dims(k, 2))
        [k_tensor, img_L_tensor] = util.todevice([k_tensor, img_L_tensor],
                                                 device)
        FB, FBC, F2B, FBFy = sr.pre_calculate(img_L_tensor, k_tensor, sf)

        # --------------------------------
        # (4) main iterations
        # --------------------------------
        for i in range(iter_num):

            print('Iter: {} / {}'.format(i, iter_num))

            # --------------------------------
            # step 1, FFT
            # --------------------------------
            tau = rhos[i].float().repeat(1, 1, 1, 1)
            x = sr.data_solution(x, FB, FBC, F2B, FBFy, tau, sf)

            if 'ircnn' in model_name:
                current_idx = np.int(
                    np.ceil(sigmas[i].cpu().numpy() * 255. / 2.) - 1)

                if current_idx != former_idx:
                    model.load_state_dict(model25[str(current_idx)],
                                          strict=True)
                    model.eval()
                    for _, v in model.named_parameters():
                        v.requires_grad = False
                    model = model.to(device)
                former_idx = current_idx

            # --------------------------------
            # step 2, denoiser
            # --------------------------------
            if x8:
                x = util.augment_img_tensor4(x, i % 8)

            if 'drunet' in model_name:
                x = torch.cat(
                    (x, sigmas[i].repeat(1, 1, x.shape[2], x.shape[3])), dim=1)
                x = utils_model.test_mode(model,
                                          x,
                                          mode=2,
                                          refield=64,
                                          min_size=256,
                                          modulo=16)
            elif 'ircnn' in model_name:
                x = model(x)

            if x8:
                if i % 8 == 3 or i % 8 == 5:
                    x = util.augment_img_tensor4(x, 8 - i % 8)
                else:
                    x = util.augment_img_tensor4(x, i % 8)

        # --------------------------------
        # (3) img_E
        # --------------------------------
        img_E = util.tensor2uint(x)
        util.imsave(
            img_E,
            os.path.join(E_path, img_name + '_x' + str(sf) + '_' + model_name +
                         '.png'))
コード例 #21
0
ファイル: demo_test_dpsr.py プロジェクト: zzeng13/DPSR
def main():

    # --------------------------------
    # let's start!
    # --------------------------------
    utils_logger.logger_info('test_dpsr', log_path='test_dpsr.log')
    logger = logging.getLogger('test_dpsr')

    # basic setting
    # ================================================

    sf = 4  # scale factor
    noise_level_img = 0 / 255.0  # noise level of low quality image, default 0
    noise_level_model = noise_level_img  # noise level of model, default 0
    show_img = True

    use_srganplus = True  # 'True' for SRGAN+ (x4) and 'False' for SRResNet+ (x2,x3,x4)
    testsets = 'testsets'
    testset_current = 'BSD68'

    if use_srganplus and sf == 4:
        model_prefix = 'DPSRGAN'
        save_suffix = 'dpsrgan'
    else:
        model_prefix = 'DPSR'
        save_suffix = 'dpsr'

    model_path = os.path.join('DPSR_models', model_prefix + 'x%01d.pth' % (sf))

    iter_num = 15  # number of iterations, fixed
    n_channels = 3  # only color images, fixed
    border = sf**2  # shave boader to calculate PSNR, fixed

    # k_type = ('d', 'm', 'g')
    k_type = ('m')  # motion blur kernel

    # ================================================

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # --------------------------------
    # load model
    # --------------------------------
    model = SRResNet(in_nc=4,
                     out_nc=3,
                     nc=96,
                     nb=16,
                     upscale=sf,
                     act_mode='R',
                     upsample_mode='pixelshuffle')
    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)
    logger.info('Model path {:s}. Testing...'.format(model_path))

    # --------------------------------
    # read image (img) and kernel (k)
    # --------------------------------
    test_results = OrderedDict()

    for k_type_n in range(len(k_type)):

        # --1--> L_folder, folder of Low-quality images
        testsubset_current = 'x%01d_%01s' % (sf, k_type[k_type_n])
        L_folder = os.path.join(testsets, testset_current, testsubset_current)

        # --2--> E_folder, folder of Estimated images
        E_folder = os.path.join(testsets, testset_current,
                                testsubset_current + '_' + save_suffix)
        util.mkdir(E_folder)

        # --3--> H_folder, folder of High-quality images
        H_folder = os.path.join(testsets, testset_current, 'GT')

        test_results['psnr_' + k_type[k_type_n]] = []

        logger.info(L_folder)
        idx = 0

        for im in os.listdir(os.path.join(L_folder)):
            if im.endswith('.jpg') or im.endswith('.bmp') or im.endswith(
                    '.png'):

                # --------------------------------
                # (1) img_L
                # --------------------------------
                idx += 1
                img_name, ext = os.path.splitext(im)
                img_L = util.imread_uint(os.path.join(L_folder, im),
                                         n_channels=n_channels)
                util.imshow(img_L) if show_img else None

                np.random.seed(seed=0)  # for reproducibility
                img_L = util.unit2single(img_L) + np.random.normal(
                    0, noise_level_img, img_L.shape)

                # --------------------------------
                # (2) kernel
                # --------------------------------
                k = loadmat(os.path.join(L_folder,
                                         img_name + '.mat'))['kernel']
                k = k.astype(np.float32)
                k /= np.sum(k)

                # --------------------------------
                # (3) get upperleft, denominator
                # --------------------------------
                upperleft, denominator = utils_deblur.get_uperleft_denominator(
                    img_L, k)

                # --------------------------------
                # (4) get rhos and sigmas
                # --------------------------------
                rhos, sigmas = utils_deblur.get_rho_sigma(sigma=max(
                    0.255 / 255., noise_level_model),
                                                          iter_num=iter_num)

                # --------------------------------
                # (5) main iteration
                # --------------------------------
                z = img_L
                rhos = np.float32(rhos)
                sigmas = np.float32(sigmas)

                for i in range(iter_num):

                    # --------------------------------
                    # step 1, Eq. (9) // FFT
                    # --------------------------------
                    rho = rhos[i]
                    if i != 0:
                        z = util.imresize_np(z, 1 / sf, True)

                    z = np.real(
                        np.fft.ifft2(
                            (upperleft + rho * np.fft.fft2(z, axes=(0, 1))) /
                            (denominator + rho),
                            axes=(0, 1)))
                    # imsave('LR_deblurred_%02d.png'%i, np.clip(z, 0, 1))

                    # --------------------------------
                    # step 2, Eq. (12) // super-resolver
                    # --------------------------------
                    sigma = torch.from_numpy(np.array(sigmas[i]))
                    img_L = util.single2tensor4(z)

                    noise_level_map = torch.ones(
                        (1, 1, img_L.size(2), img_L.size(3)),
                        dtype=torch.float).mul_(sigma)
                    img_L = torch.cat((img_L, noise_level_map), dim=1)
                    img_L = img_L.to(device)
                    # with torch.no_grad():
                    z = model(img_L)
                    z = util.tensor2single(z)

                # --------------------------------
                # (6) img_E
                # --------------------------------
                img_E = util.single2uint(z)  # np.uint8((z * 255.0).round())

                # --------------------------------
                # (7) img_H
                # --------------------------------
                img_H = util.imread_uint(os.path.join(H_folder,
                                                      img_name[:7] + '.png'),
                                         n_channels=n_channels)

                util.imshow(
                    np.concatenate([img_E, img_H], axis=1),
                    title='Recovered / Ground-truth') if show_img else None

                psnr = util.calculate_psnr(img_E, img_H, border=border)

                logger.info('{:->4d}--> {:>10s}, {:.2f}dB'.format(
                    idx, im, psnr))
                test_results['psnr_' + k_type[k_type_n]].append(psnr)

                util.imsave(img_E, os.path.join(E_folder, img_name + ext))

        ave_psnr = sum(test_results['psnr_' + k_type[k_type_n]]) / len(
            test_results['psnr_' + k_type[k_type_n]])
        logger.info(
            '------> Average PSNR(RGB) of ({} - {}) is : {:.2f} dB'.format(
                testset_current, testsubset_current, ave_psnr))
コード例 #22
0
def main(json_path='options/train_msrresnet_psnr.json'):
    '''
    # ----------------------------------------
    # Step--1 (prepare opt)
    # ----------------------------------------
    '''

    parser = argparse.ArgumentParser()
    parser.add_argument('-opt',
                        type=str,
                        default=json_path,
                        help='Path to option JSON file.')

    opt = option.parse(parser.parse_args().opt, is_train=True)
    util.mkdirs(
        (path for key, path in opt['path'].items() if 'pretrained' not in key))

    # ----------------------------------------
    # update opt
    # ----------------------------------------
    # -->-->-->-->-->-->-->-->-->-->-->-->-->-
    init_iter, init_path_G = option.find_last_checkpoint(opt['path']['models'],
                                                         net_type='G')
    opt['path']['pretrained_netG'] = init_path_G
    current_step = init_iter

    border = opt['scale']
    # --<--<--<--<--<--<--<--<--<--<--<--<--<-

    # ----------------------------------------
    # save opt to  a '../option.json' file
    # ----------------------------------------
    option.save(opt)

    # ----------------------------------------
    # return None for missing key
    # ----------------------------------------
    opt = option.dict_to_nonedict(opt)

    # ----------------------------------------
    # configure logger
    # ----------------------------------------
    logger_name = 'train'
    utils_logger.logger_info(
        logger_name, os.path.join(opt['path']['log'], logger_name + '.log'))
    logger = logging.getLogger(logger_name)
    logger.info(option.dict2str(opt))

    # ----------------------------------------
    # seed
    # ----------------------------------------
    seed = opt['train']['manual_seed']
    if seed is None:
        seed = random.randint(1, 10000)
    logger.info('Random seed: {}'.format(seed))
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    '''
    # ----------------------------------------
    # Step--2 (creat dataloader)
    # ----------------------------------------
    '''

    # ----------------------------------------
    # 1) create_dataset
    # 2) creat_dataloader for train and test
    # ----------------------------------------
    for phase, dataset_opt in opt['datasets'].items():
        if phase == 'train':
            train_set = define_Dataset(dataset_opt)
            train_size = int(
                math.ceil(
                    len(train_set) / dataset_opt['dataloader_batch_size']))
            logger.info('Number of train images: {:,d}, iters: {:,d}'.format(
                len(train_set), train_size))
            train_loader = DataLoader(
                train_set,
                batch_size=dataset_opt['dataloader_batch_size'],
                shuffle=dataset_opt['dataloader_shuffle'],
                num_workers=dataset_opt['dataloader_num_workers'],
                drop_last=True,
                pin_memory=True)
        elif phase == 'test':
            test_set = define_Dataset(dataset_opt)
            test_loader = DataLoader(test_set,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=1,
                                     drop_last=False,
                                     pin_memory=True)
        else:
            raise NotImplementedError("Phase [%s] is not recognized." % phase)
    '''
    # ----------------------------------------
    # Step--3 (initialize model)
    # ----------------------------------------
    '''

    model = define_Model(opt)
    model.init_train()
    logger.info(model.info_network())
    logger.info(model.info_params())
    '''
    # ----------------------------------------
    # Step--4 (main training)
    # ----------------------------------------
    '''

    for epoch in range(100):  # keep running
        for i, train_data in enumerate(train_loader):

            current_step += 1

            # -------------------------------
            # 1) update learning rate
            # -------------------------------
            model.update_learning_rate(current_step)

            # -------------------------------
            # 2) feed patch pairs
            # -------------------------------
            model.feed_data(train_data)

            # -------------------------------
            # 3) optimize parameters
            # -------------------------------
            model.optimize_parameters(current_step)

            # -------------------------------
            # 4) training information
            # -------------------------------
            if current_step % opt['train']['checkpoint_print'] == 0:
                logs = model.current_log()  # such as loss
                message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(
                    epoch, current_step, model.current_learning_rate())
                for k, v in logs.items():  # merge log information into message
                    message += '{:s}: {:.3e} '.format(k, v)
                logger.info(message)

            # -------------------------------
            # 5) save model
            # -------------------------------
            if current_step % opt['train']['checkpoint_save'] == 0:
                logger.info('Saving the model.')
                model.save(current_step)

            # -------------------------------
            # 6) testing
            # -------------------------------
            if current_step % opt['train']['checkpoint_test'] == 0:

                avg_psnr = 0.0
                idx = 0

                for test_data in test_loader:
                    idx += 1
                    image_name_ext = os.path.basename(test_data['L_path'][0])
                    img_name, ext = os.path.splitext(image_name_ext)

                    img_dir = os.path.join(opt['path']['images'], img_name)
                    util.mkdir(img_dir)

                    model.feed_data(test_data)
                    model.test()

                    visuals = model.current_visuals()
                    E_img = util.tensor2uint(visuals['E'])
                    H_img = util.tensor2uint(visuals['H'])

                    # -----------------------
                    # save estimated image E
                    # -----------------------
                    save_img_path = os.path.join(
                        img_dir,
                        '{:s}_{:d}.png'.format(img_name, current_step))
                    util.imsave(E_img, save_img_path)

                    # -----------------------
                    # calculate PSNR
                    # -----------------------
                    current_psnr = util.calculate_psnr(E_img,
                                                       H_img,
                                                       border=border)

                    logger.info('{:->4d}--> {:>10s} | {:<4.2f}dB'.format(
                        idx, image_name_ext, current_psnr))

                    avg_psnr += current_psnr

                avg_psnr = avg_psnr / idx

                # testing log
                logger.info(
                    '<epoch:{:3d}, iter:{:8,d}, Average PSNR : {:<.2f}dB\n'.
                    format(epoch, current_step, avg_psnr))

    logger.info('Saving the final model.')
    model.save('latest')
    logger.info('End of training.')
コード例 #23
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------
    model_name = 'usrnet'      # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny'
    testset_name = 'set_real'  # test set,  'set_real'
    test_image = 'chip.png'    # 'chip.png', 'comic.png'
    #test_image = 'comic.png'

    sf = 4                     # scale factor, only from {1, 2, 3, 4}
    show_img = False           # default: False
    save_E = True              # save estimated image
    save_LE = True             # save zoomed LR, Estimated images

    # ----------------------------------------
    # set noise level and kernel
    # ----------------------------------------
    if 'chip' in test_image:
        noise_level_img = 15       # noise level for LR image, 15 for chip
        kernel_width_default_x1234 = [0.6, 0.9, 1.7, 2.2] # Gaussian kernel widths for x1, x2, x3, x4
    else:
        noise_level_img = 2       # noise level for LR image, 0.5~3 for clean images
        kernel_width_default_x1234 = [0.4, 0.7, 1.5, 2.0] # default Gaussian kernel widths of clean/sharp images for x1, x2, x3, x4

    noise_level_model = noise_level_img/255.  # noise level of model
    kernel_width = kernel_width_default_x1234[sf-1]

    # set your own kernel width
    # kernel_width = 2.2

    k = utils_deblur.fspecial('gaussian', 25, kernel_width)
    k = sr.shift_pixel(k, sf)  # shift the kernel
    k /= np.sum(k)
    util.surf(k) if show_img else None
    # scio.savemat('kernel_realapplication.mat', {'kernel':k})

    # load approximated bicubic kernels
    #kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernel_bicubicx234.mat'))['kernels']
#    kernels = loadmat(os.path.join('kernels', 'kernel_bicubicx234.mat'))['kernels']
#    kernel = kernels[0, sf-2].astype(np.float64)

    kernel = util.single2tensor4(k[..., np.newaxis])


    n_channels = 1 if 'gray' in  model_name else 3  # 3 for color image, 1 for grayscale image
    model_pool = 'model_zoo'  # fixed
    testsets = 'testsets'     # fixed
    results = 'results'       # fixed
    result_name = testset_name + '_' + model_name
    model_path = os.path.join(model_pool, model_name+'.pth')

    # ----------------------------------------
    # L_path, E_path
    # ----------------------------------------
    L_path = os.path.join(testsets, testset_name) # L_path, fixed, for Low-quality images
    E_path = os.path.join(results, result_name)   # E_path, fixed, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name, log_path=os.path.join(E_path, logger_name+'.log'))
    logger = logging.getLogger(logger_name)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------
    if 'tiny' in model_name:
        model = net(n_iter=6, h_nc=32, in_nc=4, out_nc=3, nc=[16, 32, 64, 64],
                    nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose")
    else:
        model = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512],
                    nb=2, act_mode="R", downsample_mode='strideconv', upsample_mode="convtranspose")

    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for key, v in model.named_parameters():
        v.requires_grad = False

    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    logger.info('Params number: {}'.format(number_parameters))
    model = model.to(device)
    logger.info('Model path: {:s}'.format(model_path))

    logger.info('model_name:{}, image sigma:{}'.format(model_name, noise_level_img))
    logger.info(L_path)

    img = os.path.join(L_path, test_image)
    # ------------------------------------
    # (1) img_L
    # ------------------------------------
    img_name, ext = os.path.splitext(os.path.basename(img))
    img_L = util.imread_uint(img, n_channels=n_channels)
    img_L = util.uint2single(img_L)

    util.imshow(img_L) if show_img else None
    w, h = img_L.shape[:2]
    logger.info('{:>10s}--> ({:>4d}x{:<4d})'.format(img_name+ext, w, h))

    # boundary handling
    boarder = 8     # default setting for kernel size 25x25
    img = cv2.resize(img_L, (sf*h, sf*w), interpolation=cv2.INTER_NEAREST)
    img = utils_deblur.wrap_boundary_liu(img, [int(np.ceil(sf*w/boarder+2)*boarder), int(np.ceil(sf*h/boarder+2)*boarder)])
    img_wrap = sr.downsample_np(img, sf, center=False)
    img_wrap[:w, :h, :] = img_L
    img_L = img_wrap

    util.imshow(util.single2uint(img_L), title='LR image with noise level {}'.format(noise_level_img)) if show_img else None

    img_L = util.single2tensor4(img_L)
    img_L = img_L.to(device)

    # ------------------------------------
    # (2) img_E
    # ------------------------------------
    sigma = torch.tensor(noise_level_model).float().view([1, 1, 1, 1])
    [img_L, kernel, sigma] = [el.to(device) for el in [img_L, kernel, sigma]]

    img_E = model(img_L, kernel, sf, sigma)

    img_E = util.tensor2uint(img_E)[:sf*w, :sf*h, ...]

    if save_E:
        util.imsave(img_E, os.path.join(E_path, img_name+'_x'+str(sf)+'_'+model_name+'.png'))

    # --------------------------------
    # (3) save img_LE
    # --------------------------------
    if save_LE:
        k_v = k/np.max(k)*1.2
        k_v = util.single2uint(np.tile(k_v[..., np.newaxis], [1, 1, 3]))
        k_factor = 3
        k_v = cv2.resize(k_v, (k_factor*k_v.shape[1], k_factor*k_v.shape[0]), interpolation=cv2.INTER_NEAREST)
        img_L = util.tensor2uint(img_L)[:w, :h, ...]
        img_I = cv2.resize(img_L, (sf*img_L.shape[1], sf*img_L.shape[0]), interpolation=cv2.INTER_NEAREST)
        img_I[:k_v.shape[0], :k_v.shape[1], :] = k_v
        util.imshow(np.concatenate([img_I, img_E], axis=1), title='LR / Recovered') if show_img else None
        util.imsave(np.concatenate([img_I, img_E], axis=1), os.path.join(E_path, img_name+'_x'+str(sf)+'_'+model_name+'_LE.png'))
コード例 #24
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------

    noise_level_img = 0  # default: 0, noise level for LR image
    noise_level_model = noise_level_img  # noise level for model
    model_name = 'dpsr_x4_gan'  # 'dpsr_x2' | 'dpsr_x3' | 'dpsr_x4' | 'dpsr_x4_gan'
    testset_name = 'set5'  # test set,  'set5' | 'srbsd68'
    need_degradation = True  # default: True
    x8 = False  # default: False, x8 to boost performance
    sf = [int(s) for s in re.findall(r'\d+', model_name)][0]  # scale factor
    show_img = False  # default: False

    task_current = 'sr'  # 'dn' for denoising | 'sr' for super-resolution
    n_channels = 3  # fixed
    nc = 96  # fixed, number of channels
    nb = 16  # fixed, number of conv layers
    model_pool = 'model_zoo'  # fixed
    testsets = 'testsets'  # fixed
    results = 'results'  # fixed
    result_name = testset_name + '_' + model_name
    border = sf if task_current == 'sr' else 0  # shave boader to calculate PSNR and SSIM
    model_path = os.path.join(model_pool, model_name + '.pth')

    # ----------------------------------------
    # L_path, E_path, H_path
    # ----------------------------------------

    L_path = os.path.join(testsets,
                          testset_name)  # L_path, for Low-quality images
    H_path = L_path  # H_path, for High-quality images
    E_path = os.path.join(results, result_name)  # E_path, for Estimated images
    util.mkdir(E_path)

    if H_path == L_path:
        need_degradation = True
    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    need_H = True if H_path is not None else False
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------

    from models.network_dpsr import MSRResNet_prior as net
    model = net(in_nc=n_channels + 1,
                out_nc=n_channels,
                nc=nc,
                nb=nb,
                upscale=sf,
                act_mode='R',
                upsample_mode='pixelshuffle')
    model.load_state_dict(torch.load(model_path), strict=False)
    model.eval()
    for k, v in model.named_parameters():
        v.requires_grad = False
    model = model.to(device)
    logger.info('Model path: {:s}'.format(model_path))
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    logger.info('Params number: {}'.format(number_parameters))

    test_results = OrderedDict()
    test_results['psnr'] = []
    test_results['ssim'] = []
    test_results['psnr_y'] = []
    test_results['ssim_y'] = []

    logger.info('model_name:{}, model sigma:{}, image sigma:{}'.format(
        model_name, noise_level_img, noise_level_model))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)
    H_paths = util.get_image_paths(H_path) if need_H else None

    for idx, img in enumerate(L_paths):

        # ------------------------------------
        # (1) img_L
        # ------------------------------------

        img_name, ext = os.path.splitext(os.path.basename(img))
        # logger.info('{:->4d}--> {:>10s}'.format(idx+1, img_name+ext))
        img_L = util.imread_uint(img, n_channels=n_channels)
        img_L = util.uint2single(img_L)

        # degradation process, bicubic downsampling + Gaussian noise
        if need_degradation:
            img_L = util.modcrop(img_L, sf)
            img_L = util.imresize_np(img_L, 1 / sf)
            np.random.seed(seed=0)  # for reproducibility
            img_L += np.random.normal(0, noise_level_img / 255., img_L.shape)

        util.imshow(util.single2uint(img_L),
                    title='LR image with noise level {}'.format(
                        noise_level_img)) if show_img else None

        img_L = util.single2tensor4(img_L)
        noise_level_map = torch.full((1, 1, img_L.size(2), img_L.size(3)),
                                     noise_level_model / 255.).type_as(img_L)
        img_L = torch.cat((img_L, noise_level_map), dim=1)
        img_L = img_L.to(device)

        # ------------------------------------
        # (2) img_E
        # ------------------------------------

        if not x8:
            img_E = model(img_L)
        else:
            img_E = utils_model.test_mode(model, img_L, mode=3, sf=sf)

        img_E = util.tensor2uint(img_E)

        if need_H:

            # --------------------------------
            # (3) img_H
            # --------------------------------

            img_H = util.imread_uint(H_paths[idx], n_channels=n_channels)
            img_H = img_H.squeeze()
            img_H = util.modcrop(img_H, sf)

            # --------------------------------
            # PSNR and SSIM
            # --------------------------------

            psnr = util.calculate_psnr(img_E, img_H, border=border)
            ssim = util.calculate_ssim(img_E, img_H, border=border)
            test_results['psnr'].append(psnr)
            test_results['ssim'].append(ssim)
            logger.info('{:s} - PSNR: {:.2f} dB; SSIM: {:.4f}.'.format(
                img_name + ext, psnr, ssim))
            util.imshow(np.concatenate([img_E, img_H], axis=1),
                        title='Recovered / Ground-truth') if show_img else None

            if np.ndim(img_H) == 3:  # RGB image
                img_E_y = util.rgb2ycbcr(img_E, only_y=True)
                img_H_y = util.rgb2ycbcr(img_H, only_y=True)
                psnr_y = util.calculate_psnr(img_E_y, img_H_y, border=border)
                ssim_y = util.calculate_ssim(img_E_y, img_H_y, border=border)
                test_results['psnr_y'].append(psnr_y)
                test_results['ssim_y'].append(ssim_y)

        # ------------------------------------
        # save results
        # ------------------------------------

        util.imsave(img_E, os.path.join(E_path, img_name + '.png'))

    if need_H:
        ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
        ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
        logger.info(
            'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}'
            .format(result_name, sf, ave_psnr, ave_ssim))
        if np.ndim(img_H) == 3:
            ave_psnr_y = sum(test_results['psnr_y']) / len(
                test_results['psnr_y'])
            ave_ssim_y = sum(test_results['ssim_y']) / len(
                test_results['ssim_y'])
            logger.info(
                'Average PSNR/SSIM( Y ) - {} - x{} - PSNR: {:.2f} dB; SSIM: {:.4f}'
                .format(result_name, sf, ave_psnr_y, ave_ssim_y))
コード例 #25
0
def main():

    # ----------------------------------------
    # Preparation
    # ----------------------------------------
    model_name = 'usrnet_tiny'  # 'usrgan' | 'usrnet' | 'usrgan_tiny' | 'usrnet_tiny'
    testset_name = 'srcvte'  # test set,  'set5' | 'srbsd68' | 'srcvte'
    test_sf = [
        4
    ]  # if 'gan' in model_name else [2, 3, 4]  # scale factor, from {1,2,3,4}

    load_kernels = False
    show_img = False  # default: False
    save_L = False  # save LR image
    save_E = True  # save estimated image
    save_LEH = False  # save zoomed LR, E and H images

    # ----------------------------------------
    # load testing kernels
    # ----------------------------------------
    # kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels.mat'))['kernels']
    kernels = loadmat(os.path.join(
        'kernels', 'kernels_12.mat'))['kernels'] if load_kernels else None

    n_channels = 1 if 'gray' in model_name else 3  # 3 for color image, 1 for grayscale image
    model_pool = '/home/dengzeshuai/pretrained_models/USRnet/'  # fixed
    testsets = '/home/datasets/sr/'  # fixed
    results = 'results'  # fixed
    noise_level_img = 0  # fixed: 0, noise level for LR image
    noise_level_model = noise_level_img  # fixed, noise level of model, default 0
    result_name = testset_name + '_' + model_name + '_blur'
    model_path = os.path.join(model_pool, model_name + '.pth')

    # ----------------------------------------
    # L_path = H_path, E_path, logger
    # ----------------------------------------
    L_path = os.path.join(
        testsets,
        testset_name)  # L_path and H_path, fixed, for Low-quality images
    if testset_name == 'srcvte':
        L_path = os.path.join(testsets, testset_name, 'LR_val')
        H_path = os.path.join(testsets, testset_name, 'HR_val')
        video_names = os.listdir(H_path)
    E_path = os.path.join(results,
                          result_name)  # E_path, fixed, for Estimated images
    util.mkdir(E_path)

    logger_name = result_name
    utils_logger.logger_info(logger_name,
                             log_path=os.path.join(E_path,
                                                   logger_name + '.log'))
    logger = logging.getLogger(logger_name)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # ----------------------------------------
    # load model
    # ----------------------------------------
    if 'tiny' in model_name:
        model = net(n_iter=6,
                    h_nc=32,
                    in_nc=4,
                    out_nc=3,
                    nc=[16, 32, 64, 64],
                    nb=2,
                    act_mode="R",
                    downsample_mode='strideconv',
                    upsample_mode="convtranspose")
    else:
        model = net(n_iter=8,
                    h_nc=64,
                    in_nc=4,
                    out_nc=3,
                    nc=[64, 128, 256, 512],
                    nb=2,
                    act_mode="R",
                    downsample_mode='strideconv',
                    upsample_mode="convtranspose")

    model.load_state_dict(torch.load(model_path), strict=True)
    model.eval()
    for key, v in model.named_parameters():
        v.requires_grad = False
    number_parameters = sum(map(lambda x: x.numel(), model.parameters()))
    model = model.to(device)

    logger.info('Model path: {:s}'.format(model_path))
    logger.info('Params number: {}'.format(number_parameters))
    logger.info('Model_name:{}, image sigma:{}'.format(model_name,
                                                       noise_level_img))
    logger.info(L_path)
    L_paths = util.get_image_paths(L_path)
    need_H = True if H_path is not None else False
    H_paths = util.get_image_paths(H_path) if need_H else None

    # --------------------------------
    # read images
    # --------------------------------
    test_results_ave = OrderedDict()
    test_results_ave['psnr_sf_k'] = []
    test_results_ave['ssim_sf_k'] = []
    test_results_ave['psnr_y_sf_k'] = []
    test_results_ave['ssim_y_sf_k'] = []

    for sf in test_sf:
        loop = kernels.shape[1] if load_kernels else 1
        for k_index in range(loop):

            test_results = OrderedDict()
            test_results['psnr'] = []
            test_results['ssim'] = []
            test_results['psnr_y'] = []
            test_results['ssim_y'] = []

            if load_kernels:
                kernel = kernels[0, k_index].astype(np.float64)
            else:
                ## other kernels
                # kernel = utils_deblur.blurkernel_synthesis(h=25)  # motion kernel
                kernel = utils_deblur.fspecial('gaussian', 25,
                                               1.6)  # Gaussian kernel
                kernel = sr.shift_pixel(kernel, sf)  # pixel shift; optional
                kernel /= np.sum(kernel)

            util.surf(kernel) if show_img else None
            # idx = 0

            for idx, img in enumerate(L_paths):

                # --------------------------------
                # (1) classical degradation, img_L
                # --------------------------------

                img_name, ext = os.path.splitext(os.path.basename(img))
                if testset_name == 'srcvte':
                    video_name = os.path.basename(os.path.dirname(img))
                img_L = util.imread_uint(img, n_channels=n_channels)
                img_L = util.uint2single(img_L)

                # generate degraded LR image
                # img_L = ndimage.filters.convolve(img_H, kernel[..., np.newaxis], mode='wrap')  # blur
                # img_L = sr.downsample_np(img_L, sf, center=False)  # downsample, standard s-fold downsampler
                # img_L = util.uint2single(img_L)  # uint2single

                # np.random.seed(seed=0)  # for reproducibility
                # img_L += np.random.normal(0, noise_level_img, img_L.shape) # add AWGN

                util.imshow(util.single2uint(img_L)) if show_img else None

                x = util.single2tensor4(img_L)
                k = util.single2tensor4(kernel[..., np.newaxis])
                sigma = torch.tensor(noise_level_model).float().view(
                    [1, 1, 1, 1])
                [x, k, sigma] = [el.to(device) for el in [x, k, sigma]]

                # --------------------------------
                # (2) inference
                # --------------------------------
                x = model(x, k, sf, sigma)

                # --------------------------------
                # (3) img_E
                # --------------------------------
                img_E = util.tensor2uint(x)

                if save_E:
                    if testset_name == 'srcvte':
                        save_path = os.path.join(E_path, video_name)
                        util.mkdir(save_path)
                        # util.imsave(img_E, os.path.join(save_path, img_name+'_k'+str(k_index+1)+'.png'))
                        util.imsave(img_E,
                                    os.path.join(save_path, img_name + '.png'))
                    else:
                        util.imsave(
                            img_E,
                            os.path.join(
                                E_path, img_name + '_x' + str(sf) + '_k' +
                                str(k_index + 1) + '_' + model_name + '.png'))

                # --------------------------------
                # (4) img_H
                # --------------------------------
                if need_H:
                    img_H = util.imread_uint(H_paths[idx],
                                             n_channels=n_channels)
                    img_H = img_H.squeeze()
                    img_H = util.modcrop(img_H, sf)

                    psnr = util.calculate_psnr(
                        img_E, img_H, border=sf)  # change with your own border
                    ssim = util.calculate_ssim(img_E, img_H, border=sf)
                    test_results['psnr'].append(psnr)
                    test_results['ssim'].append(ssim)

                    if np.ndim(img_H) == 3:  # RGB image
                        img_E_y = util.rgb2ycbcr(img_E, only_y=True)
                        img_H_y = util.rgb2ycbcr(img_H, only_y=True)
                        psnr_y = util.calculate_psnr(img_E_y,
                                                     img_H_y,
                                                     border=sf)
                        ssim_y = util.calculate_ssim(img_E_y,
                                                     img_H_y,
                                                     border=sf)
                        test_results['psnr_y'].append(psnr_y)
                        test_results['ssim_y'].append(ssim_y)
                        logger.info(
                            '{:->4d} --> {:>4s}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB SSIM: {:.4f}'
                            .format(idx, video_name, img_name + ext, sf,
                                    k_index, psnr_y, ssim_y))
                    else:
                        logger.info(
                            '{:->4d} --> {:>4s}--> {:>10s} -- x{:>2d} --k{:>2d} PSNR: {:.2f}dB SSIM: {:.4f}'
                            .format(idx, video_name, img_name + ext, sf,
                                    k_index, psnr, ssim))

            if need_H:
                ave_psnr = sum(test_results['psnr']) / len(
                    test_results['psnr'])
                ave_ssim = sum(test_results['ssim']) / len(
                    test_results['ssim'])
                logger.info(
                    'Average PSNR/SSIM(RGB) - {} - x{} --PSNR: {:.2f} dB; SSIM: {:.4f}'
                    .format(result_name, sf, ave_psnr, ave_ssim))
                logger.info(
                    '------> Average PSNR(RGB) - {} - x{}, kernel:{} sigma:{} --PSNR: {:.2f} dB; SSIM: {:.4f}'
                    .format(testset_name, sf, k_index + 1, noise_level_model,
                            ave_psnr, ave_ssim))
                if np.ndim(img_H) == 3:
                    ave_psnr_y = sum(test_results['psnr_y']) / len(
                        test_results['psnr_y'])
                    ave_ssim_y = sum(test_results['ssim_y']) / len(
                        test_results['ssim_y'])
                    logger.info(
                        '------> Average PSNR(Y) - {} - x{}, kernel:{} sigma:{} --PSNR: {:.2f} dB; SSIM: {:.4f}'
                        .format(testset_name, sf, k_index + 1,
                                noise_level_model, ave_psnr_y, ave_ssim_y))

                test_results_ave['psnr_sf_k'].append(ave_psnr)
                test_results_ave['ssim_sf_k'].append(ave_ssim)
                if np.ndim(img_H) == 3:
                    test_results_ave['psnr_y_sf_k'].append(ave_psnr_y)
                    test_results_ave['ssim_y_sf_k'].append(ave_ssim_y)

    logger.info(test_results_ave['psnr_sf_k'])
    logger.info(test_results_ave['ssim_sf_k'])
    if np.ndim(img_H) == 3:
        logger.info(test_results_ave['psnr_y_sf_k'])
        logger.info(test_results_ave['ssim_y_sf_k'])