def main():
    """Calculate PSNR and SSIM for images.

    Configurations:
        folder_gt (str): Path to gt (Ground-Truth).
        folder_restored (str): Path to restored images.
        crop_border (int): Crop border for each side.
        suffix (str): Suffix for restored images.
        test_y_channel (bool): If True, test Y channel (In MatLab YCbCr format)
            If False, test RGB channels.
    """
    # Configurations
    # -------------------------------------------------------------------------
    folder_gt = 'datasets/val_set14/Set14'
    folder_restored = 'results/exp/visualization/val_set14'
    crop_border = 4
    suffix = '_expname'
    test_y_channel = False
    # -------------------------------------------------------------------------

    psnr_all = []
    ssim_all = []
    img_list = sorted(mmcv.scandir(folder_gt, recursive=True))

    if test_y_channel:
        print('Testing Y channel.')
    else:
        print('Testing RGB channels.')

    for i, img_path in enumerate(img_list):
        basename, ext = osp.splitext(osp.basename(img_path))
        img_gt = mmcv.imread(
            osp.join(folder_gt, img_path), flag='unchanged').astype(
                np.float32) / 255.
        img_restored = mmcv.imread(
            osp.join(folder_restored, basename + suffix + ext),
            flag='unchanged').astype(np.float32) / 255.

        if test_y_channel and img_gt.ndim == 3 and img_gt.shape[2] == 3:
            img_gt = mmcv.bgr2ycbcr(img_gt, y_only=True)
            img_restored = mmcv.bgr2ycbcr(img_restored, y_only=True)

        # calculate PSNR and SSIM
        psnr = calculate_psnr(
            img_gt * 255,
            img_restored * 255,
            crop_border=crop_border,
            input_order='HWC')
        ssim = calculate_ssim(
            img_gt * 255,
            img_restored * 255,
            crop_border=crop_border,
            input_order='HWC')
        print(f'{i+1:3d}: {basename:25}. \tPSNR: {psnr:.6f} dB, '
              f'\tSSIM: {ssim:.6f}')
        psnr_all.append(psnr)
        ssim_all.append(ssim)
    print(f'Average: PSNR: {sum(psnr_all) / len(psnr_all):.6f} dB, '
          f'SSIM: {sum(ssim_all) / len(ssim_all):.6f}')
Exemplo n.º 2
0
def ssim(img1, img2, crop_border=0, input_order='HWC', convert_to=None):
    """Calculate SSIM (structural similarity).

    Ref:
    Image quality assessment: From error visibility to structural similarity

    The results are the same as that of the official released MATLAB code in
    https://ece.uwaterloo.ca/~z70wang/research/ssim/.

    For three-channel images, SSIM is calculated for each channel and then
    averaged.

    Args:
        img1 (ndarray): Images with range [0, 255].
        img2 (ndarray): Images with range [0, 255].
        crop_border (int): Cropped pixels in each edges of an image. These
            pixels are not involved in the SSIM calculation. Default: 0.
        input_order (str): Whether the input order is 'HWC' or 'CHW'.
            Default: 'HWC'.
        convert_to (str): Whether to convert the images to other color models.
            If None, the images are not altered. When computing for 'Y',
            the images are assumed to be in BGR order. Options are 'Y' and
            None. Default: None.

    Returns:
        float: ssim result.
    """

    assert img1.shape == img2.shape, (
        f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
    if input_order not in ['HWC', 'CHW']:
        raise ValueError(
            f'Wrong input_order {input_order}. Supported input_orders are '
            '"HWC" and "CHW"')
    img1 = reorder_image(img1, input_order=input_order)
    img2 = reorder_image(img2, input_order=input_order)

    if isinstance(convert_to, str) and convert_to.lower() == 'y':
        img1, img2 = img1.astype(np.float32), img2.astype(np.float32)
        img1 = mmcv.bgr2ycbcr(img1 / 255., y_only=True) * 255.
        img2 = mmcv.bgr2ycbcr(img2 / 255., y_only=True) * 255.
        img1 = np.expand_dims(img1, axis=2)
        img2 = np.expand_dims(img2, axis=2)
    elif convert_to is not None:
        raise ValueError(f'Wrong color model. Supported values are '
                         '"Y" and None')

    if crop_border != 0:
        img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]
        img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]

    ssims = []
    for i in range(img1.shape[2]):
        ssims.append(_ssim(img1[..., i], img2[..., i]))
    return np.array(ssims).mean()
Exemplo n.º 3
0
def test_bgr2ycbcr():
    # float32
    in_img = np.random.rand(10, 10, 3).astype(np.float32)
    out_img = mmcv.bgr2ycbcr(in_img)
    computed_ycbcr = np.empty_like(in_img)
    for i in range(in_img.shape[0]):
        for j in range(in_img.shape[1]):
            b, g, r = in_img[i, j]
            y = 16 + r * 65.481 + g * 128.553 + b * 24.966
            cb = 128 - r * 37.797 - g * 74.203 + b * 112.0
            cr = 128 + r * 112.0 - g * 93.786 - b * 18.214
            computed_ycbcr[i, j, :] = [y, cb, cr]
    computed_ycbcr /= 255.
    assert_array_almost_equal(out_img, computed_ycbcr, decimal=2)
    # y_only=True
    in_img = np.random.rand(10, 10, 3).astype(np.float32)
    out_img = mmcv.bgr2ycbcr(in_img, y_only=True)
    computed_y = np.empty_like(out_img, dtype=out_img.dtype)
    for i in range(in_img.shape[0]):
        for j in range(in_img.shape[1]):
            b, g, r = in_img[i, j]
            y = 16 + r * 65.481 + g * 128.553 + b * 24.966
            computed_y[i, j] = y
    computed_y /= 255.
    assert_array_almost_equal(out_img, computed_y, decimal=2)

    # uint8
    in_img = (np.random.rand(10, 10, 3) * 255).astype(np.uint8)
    out_img = mmcv.bgr2ycbcr(in_img)
    computed_ycbcr = np.empty_like(in_img)
    in_img = in_img / 255.
    for i in range(in_img.shape[0]):
        for j in range(in_img.shape[1]):
            b, g, r = in_img[i, j]
            y = 16 + r * 65.481 + g * 128.553 + b * 24.966
            cb = 128 - r * 37.797 - g * 74.203 + b * 112.0
            cr = 128 + r * 112.0 - g * 93.786 - b * 18.214
            y, cb, cr = y.round(), cb.round(), cr.round()
            computed_ycbcr[i, j, :] = [y, cb, cr]
    assert_image_almost_equal(out_img, computed_ycbcr)
    # y_only = True
    in_img = (np.random.rand(10, 10, 3) * 255).astype(np.uint8)
    out_img = mmcv.bgr2ycbcr(in_img, y_only=True)
    computed_y = np.empty_like(out_img, dtype=out_img.dtype)
    in_img = in_img / 255.
    for i in range(in_img.shape[0]):
        for j in range(in_img.shape[1]):
            b, g, r = in_img[i, j]
            y = 16 + r * 65.481 + g * 128.553 + b * 24.966
            y = y.round()
            computed_y[i, j] = y
    assert_image_almost_equal(out_img, computed_y)
Exemplo n.º 4
0
def psnr(img1, img2, crop_border=0, input_order='HWC', convert_to=None):
    """Calculate PSNR (Peak Signal-to-Noise Ratio).

    Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio

    Args:
        img1 (ndarray): Images with range [0, 255].
        img2 (ndarray): Images with range [0, 255].
        crop_border (int): Cropped pixels in each edges of an image. These
            pixels are not involved in the PSNR calculation. Default: 0.
        input_order (str): Whether the input order is 'HWC' or 'CHW'.
            Default: 'HWC'.
        convert_to (str): Whether to convert the images to other color models.
            If None, the images are not altered. When computing for 'Y',
            the images are assumed to be in BGR order. Options are 'Y' and
            None. Default: None.

    Returns:
        float: psnr result.
    """

    assert img1.shape == img2.shape, (
        f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
    if input_order not in ['HWC', 'CHW']:
        raise ValueError(
            f'Wrong input_order {input_order}. Supported input_orders are '
            '"HWC" and "CHW"')
    img1 = reorder_image(img1, input_order=input_order)
    img2 = reorder_image(img2, input_order=input_order)

    if isinstance(convert_to, str) and convert_to.lower() == 'y':
        img1, img2 = img1.astype(np.float32), img2.astype(np.float32)
        img1 = mmcv.bgr2ycbcr(img1 / 255., y_only=True) * 255.
        img2 = mmcv.bgr2ycbcr(img2 / 255., y_only=True) * 255.
    elif convert_to is not None:
        raise ValueError(f'Wrong color model. Supported values are '
                         '"Y" and None.')

    if crop_border != 0:
        img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]
        img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]

    mse_value = np.mean((img1 - img2)**2)
    if mse_value == 0:
        return float('inf')
    return 20. * np.log10(255. / np.sqrt(mse_value))
Exemplo n.º 5
0
    def __call__(self, results):
        """Call function.

        Args:
            results (dict): A dict containing the necessary information and
                data for augmentation.

        Returns:
            dict: A dict containing the processed data and information.
        """

        if self.file_client is None:
            self.file_client = FileClient(self.io_backend, **self.kwargs)
        filepaths = results[f'{self.key}_path']
        if not isinstance(filepaths, list):
            raise TypeError(
                f'filepath should be list, but got {type(filepaths)}')

        filepaths = [str(v) for v in filepaths]

        imgs = []
        shapes = []
        if self.save_original_img:
            ori_imgs = []
        for filepath in filepaths:
            img_bytes = self.file_client.get(filepath)
            img = mmcv.imfrombytes(img_bytes,
                                   flag=self.flag,
                                   channel_order=self.channel_order)  # HWC

            # convert to y-channel, if specified
            if self.convert_to is not None:
                if self.channel_order == 'bgr' and self.convert_to.lower(
                ) == 'y':
                    img = mmcv.bgr2ycbcr(img, y_only=True)
                elif self.channel_order == 'rgb':
                    img = mmcv.rgb2ycbcr(img, y_only=True)
                else:
                    raise ValueError('Currently support only "bgr2ycbcr" or '
                                     '"bgr2ycbcr".')

            if img.ndim == 2:
                img = np.expand_dims(img, axis=2)

            imgs.append(img)
            shapes.append(img.shape)
            if self.save_original_img:
                ori_imgs.append(img.copy())

        results[self.key] = imgs
        results[f'{self.key}_path'] = filepaths
        results[f'{self.key}_ori_shape'] = shapes
        if self.save_original_img:
            results[f'ori_{self.key}'] = ori_imgs

        return results
Exemplo n.º 6
0
def niqe(img, crop_border, input_order='HWC', convert_to='y'):
    """Calculate NIQE (Natural Image Quality Evaluator) metric.

    Ref: Making a "Completely Blind" Image Quality Analyzer.
    This implementation could produce almost the same results as the official
    MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip

    We use the official params estimated from the pristine dataset.
    We use the recommended block size (96, 96) without overlaps.

    Args:
        img (ndarray): Input image whose quality needs to be computed.
            The input image must be in range [0, 255] with float/int type.
            The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order)
            If the input order is 'HWC' or 'CHW', it will be converted to gray
            or Y (of YCbCr) image according to the ``convert_to`` argument.
        crop_border (int): Cropped pixels in each edge of an image. These
            pixels are not involved in the metric calculation.
        input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'.
            Default: 'HWC'.
        convert_to (str): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'.
            Default: 'y'.

    Returns:
        float: NIQE result.
    """

    # we use the official params estimated from the pristine dataset.
    niqe_pris_params = np.load('mmedit/core/evaluation/niqe_pris_params.npz')
    mu_pris_param = niqe_pris_params['mu_pris_param']
    cov_pris_param = niqe_pris_params['cov_pris_param']
    gaussian_window = niqe_pris_params['gaussian_window']

    img = img.astype(np.float32)
    if input_order != 'HW':
        img = reorder_image(img, input_order=input_order)
        if convert_to == 'y':
            img = mmcv.bgr2ycbcr(img / 255., y_only=True) * 255.
        elif convert_to == 'gray':
            img = mmcv.bgr2gray(img / 255., cv2.COLOR_BGR2GRAY) * 255.
        img = np.squeeze(img)

    if crop_border != 0:
        img = img[crop_border:-crop_border, crop_border:-crop_border]

    # round to follow official implementation
    img = img.round()

    niqe_result = niqe_core(img, mu_pris_param, cov_pris_param,
                            gaussian_window)

    return niqe_result
Exemplo n.º 7
0
    def __call__(self, results):
        """Call function.

        Args:
            results (dict): A dict containing the necessary information and
                data for augmentation.

        Returns:
            dict: A dict containing the processed data and information.
        """
        filepath = str(results[f'{self.key}_path'])
        if self.file_client is None:
            self.file_client = FileClient(self.io_backend, **self.kwargs)
        if self.use_cache:
            if self.cache is None:
                self.cache = dict()
            if filepath in self.cache:
                img = self.cache[filepath]
            else:
                img_bytes = self.file_client.get(filepath)
                img = mmcv.imfrombytes(img_bytes,
                                       flag=self.flag,
                                       channel_order=self.channel_order,
                                       backend=self.backend)  # HWC
                self.cache[filepath] = img
        else:
            img_bytes = self.file_client.get(filepath)
            img = mmcv.imfrombytes(img_bytes,
                                   flag=self.flag,
                                   channel_order=self.channel_order,
                                   backend=self.backend)  # HWC

        if self.convert_to is not None:
            if self.channel_order == 'bgr' and self.convert_to.lower() == 'y':
                img = mmcv.bgr2ycbcr(img, y_only=True)
            elif self.channel_order == 'rgb':
                img = mmcv.rgb2ycbcr(img, y_only=True)
            else:
                raise ValueError('Currently support only "bgr2ycbcr" or '
                                 '"bgr2ycbcr".')
            if img.ndim == 2:
                img = np.expand_dims(img, axis=2)

        results[self.key] = img
        results[f'{self.key}_path'] = filepath
        results[f'{self.key}_ori_shape'] = img.shape
        if self.save_original_img:
            results[f'ori_{self.key}'] = img.copy()

        return results
Exemplo n.º 8
0
def to_y_channel(img):
    """Change to Y channel of YCbCr.

    Args:
        img (ndarray): Images with range [0, 255].

    Returns:
        (ndarray): Images with range [0, 255] (float type) without round.
    """
    img = img.astype(np.float32) / 255.
    if img.ndim == 3 and img.shape[2] == 3:
        img = mmcv.bgr2ycbcr(img, y_only=True)
        img = img[..., None]
    return img * 255.
Exemplo n.º 9
0
def test_load_image_from_file():
    path_baboon = Path(
        __file__).parent.parent.parent / 'data' / 'gt' / 'baboon.png'
    img_baboon = mmcv.imread(str(path_baboon), flag='color')
    path_baboon_x4 = Path(
        __file__).parent.parent.parent / 'data' / 'lq' / 'baboon_x4.png'
    img_baboon_x4 = mmcv.imread(str(path_baboon_x4), flag='color')

    # read gt image
    # input path is Path object
    results = dict(gt_path=path_baboon)
    config = dict(io_backend='disk', key='gt')
    image_loader = LoadImageFromFile(**config)
    results = image_loader(results)
    assert results['gt'].shape == (480, 500, 3)
    np.testing.assert_almost_equal(results['gt'], img_baboon)
    assert results['gt_path'] == str(path_baboon)
    # input path is str
    results = dict(gt_path=str(path_baboon))
    results = image_loader(results)
    assert results['gt'].shape == (480, 500, 3)
    np.testing.assert_almost_equal(results['gt'], img_baboon)
    assert results['gt_path'] == str(path_baboon)

    # read lq image
    # input path is Path object
    results = dict(lq_path=path_baboon_x4)
    config = dict(io_backend='disk', key='lq')
    image_loader = LoadImageFromFile(**config)
    results = image_loader(results)
    assert results['lq'].shape == (120, 125, 3)
    np.testing.assert_almost_equal(results['lq'], img_baboon_x4)
    assert results['lq_path'] == str(path_baboon_x4)
    # input path is str
    results = dict(lq_path=str(path_baboon_x4))
    results = image_loader(results)
    assert results['lq'].shape == (120, 125, 3)
    np.testing.assert_almost_equal(results['lq'], img_baboon_x4)
    assert results['lq_path'] == str(path_baboon_x4)
    assert repr(image_loader) == (
        image_loader.__class__.__name__ +
        ('(io_backend=disk, key=lq, '
         'flag=color, save_original_img=False, channel_order=bgr, '
         'use_cache=False)'))

    results = dict(lq_path=path_baboon_x4)
    config = dict(io_backend='disk',
                  key='lq',
                  flag='grayscale',
                  save_original_img=True)
    image_loader = LoadImageFromFile(**config)
    results = image_loader(results)
    assert results['lq'].shape == (120, 125)
    assert results['lq_ori_shape'] == (120, 125)
    np.testing.assert_almost_equal(results['ori_lq'], results['lq'])
    assert id(results['ori_lq']) != id(results['lq'])

    # test: use_cache
    results = dict(gt_path=path_baboon)
    config = dict(io_backend='disk', key='gt', use_cache=True)
    image_loader = LoadImageFromFile(**config)
    assert image_loader.cache is None
    assert repr(image_loader) == (
        image_loader.__class__.__name__ +
        ('(io_backend=disk, key=gt, '
         'flag=color, save_original_img=False, channel_order=bgr, '
         'use_cache=True)'))
    results = image_loader(results)
    assert image_loader.cache is not None
    assert str(path_baboon) in image_loader.cache
    assert results['gt'].shape == (480, 500, 3)
    assert results['gt_path'] == str(path_baboon)
    np.testing.assert_almost_equal(results['gt'], img_baboon)

    # convert to y-channel (bgr2y)
    results = dict(gt_path=path_baboon)
    config = dict(io_backend='disk', key='gt', convert_to='y')
    image_loader = LoadImageFromFile(**config)
    results = image_loader(results)
    assert results['gt'].shape == (480, 500, 1)
    img_baboon_y = mmcv.bgr2ycbcr(img_baboon, y_only=True)
    img_baboon_y = np.expand_dims(img_baboon_y, axis=2)
    np.testing.assert_almost_equal(results['gt'], img_baboon_y)
    assert results['gt_path'] == str(path_baboon)

    # convert to y-channel (rgb2y)
    results = dict(gt_path=path_baboon)
    config = dict(io_backend='disk',
                  key='gt',
                  channel_order='rgb',
                  convert_to='y')
    image_loader = LoadImageFromFile(**config)
    results = image_loader(results)
    assert results['gt'].shape == (480, 500, 1)
    img_baboon_y = mmcv.bgr2ycbcr(img_baboon, y_only=True)
    img_baboon_y = np.expand_dims(img_baboon_y, axis=2)
    np.testing.assert_almost_equal(results['gt'], img_baboon_y)
    assert results['gt_path'] == str(path_baboon)

    # convert to y-channel (ValueError)
    results = dict(gt_path=path_baboon)
    config = dict(io_backend='disk', key='gt', convert_to='abc')
    image_loader = LoadImageFromFile(**config)
    with pytest.raises(ValueError):
        results = image_loader(results)
Exemplo n.º 10
0
def test_load_image_from_file_list():
    path_baboon = Path(
        __file__).parent.parent.parent / 'data' / 'gt' / 'baboon.png'
    img_baboon = mmcv.imread(str(path_baboon), flag='color')
    path_baboon_x4 = Path(
        __file__).parent.parent.parent / 'data' / 'lq' / 'baboon_x4.png'
    img_baboon_x4 = mmcv.imread(str(path_baboon_x4), flag='color')

    # input path is Path object
    results = dict(lq_path=[path_baboon_x4, path_baboon])
    config = dict(io_backend='disk', key='lq')
    image_loader = LoadImageFromFileList(**config)
    results = image_loader(results)
    np.testing.assert_almost_equal(results['lq'][0], img_baboon_x4)
    np.testing.assert_almost_equal(results['lq'][1], img_baboon)
    assert results['lq_ori_shape'] == [(120, 125, 3), (480, 500, 3)]
    assert results['lq_path'] == [str(path_baboon_x4), str(path_baboon)]
    # input path is str
    results = dict(lq_path=[str(path_baboon_x4), str(path_baboon)])
    config = dict(io_backend='disk', key='lq')
    image_loader = LoadImageFromFileList(**config)
    results = image_loader(results)
    np.testing.assert_almost_equal(results['lq'][0], img_baboon_x4)
    np.testing.assert_almost_equal(results['lq'][1], img_baboon)
    assert results['lq_path'] == [str(path_baboon_x4), str(path_baboon)]

    # save ori_img
    results = dict(lq_path=[path_baboon_x4])
    config = dict(io_backend='disk', key='lq', save_original_img=True)
    image_loader = LoadImageFromFileList(**config)
    results = image_loader(results)
    np.testing.assert_almost_equal(results['lq'][0], img_baboon_x4)
    assert results['lq_ori_shape'] == [(120, 125, 3)]
    assert results['lq_path'] == [str(path_baboon_x4)]
    np.testing.assert_almost_equal(results['ori_lq'][0], img_baboon_x4)

    with pytest.raises(TypeError):
        # filepath should be list
        results = dict(lq_path=path_baboon_x4)
        image_loader(results)

    # convert to y-channel (bgr2y)
    results = dict(lq_path=[str(path_baboon_x4), str(path_baboon)])
    config = dict(io_backend='disk', key='lq', convert_to='y')
    image_loader = LoadImageFromFileList(**config)
    results = image_loader(results)
    img_baboon_x4_y = mmcv.bgr2ycbcr(img_baboon_x4, y_only=True)
    img_baboon_y = mmcv.bgr2ycbcr(img_baboon, y_only=True)
    img_baboon_x4_y = np.expand_dims(img_baboon_x4_y, axis=2)
    img_baboon_y = np.expand_dims(img_baboon_y, axis=2)
    np.testing.assert_almost_equal(results['lq'][0], img_baboon_x4_y)
    np.testing.assert_almost_equal(results['lq'][1], img_baboon_y)
    assert results['lq_path'] == [str(path_baboon_x4), str(path_baboon)]

    # convert to y-channel (rgb2y)
    results = dict(lq_path=[str(path_baboon_x4), str(path_baboon)])
    config = dict(io_backend='disk',
                  key='lq',
                  channel_order='rgb',
                  convert_to='y')
    image_loader = LoadImageFromFileList(**config)
    results = image_loader(results)
    np.testing.assert_almost_equal(results['lq'][0], img_baboon_x4_y)
    np.testing.assert_almost_equal(results['lq'][1], img_baboon_y)
    assert results['lq_path'] == [str(path_baboon_x4), str(path_baboon)]

    # convert to y-channel (ValueError)
    results = dict(lq_path=[str(path_baboon_x4), str(path_baboon)])
    config = dict(io_backend='disk', key='lq', convert_to='abc')
    image_loader = LoadImageFromFileList(**config)
    with pytest.raises(ValueError):
        results = image_loader(results)