コード例 #1
0
def image_quality_evaluation(sr_filename: str, hr_filename: str, device: torch.device = "cpu"):
    """Image quality evaluation function.

    Args:
        sr_filename (str): Image file name after super resolution.
        hr_filename (str): Original high resolution image file name.
        device (optional, torch.device): Selection of data processing equipment in PyTorch. (Default: ``cpu``).

    Returns:
        If the `simple` variable is set to ``False`` return `mse, rmse, psnr, ssim, msssim, niqe, sam, vifp, lpips`,
        else return `psnr, ssim`.
    """
    # Reference sources from `https://github.com/richzhang/PerceptualSimilarity`
    lpips_loss = lpips.LPIPS(net="vgg", verbose=False).to(device)
    # Evaluate performance
    sr = cv2.imread(sr_filename)
    hr = cv2.imread(hr_filename)

    # For LPIPS evaluation
    sr_tensor = opencv2tensor(sr, device)
    hr_tensor = opencv2tensor(hr, device)

    # Complete estimate.
    mse_value = mse(sr, hr)
    rmse_value = rmse(sr, hr)
    psnr_value = psnr(sr, hr)
    ssim_value = ssim(sr, hr)
    msssim_value = msssim(sr, hr)
    niqe_value = niqe(sr_filename)
    sam_value = sam(sr, hr)
    vifp_value = vifp(sr, hr)
    lpips_value = lpips_loss(sr_tensor, hr_tensor)
    return mse_value, rmse_value, psnr_value, ssim_value, msssim_value, niqe_value, sam_value, vifp_value, lpips_value
コード例 #2
0
def obtain_similarity_metrics(GT_img, distorted_img):
    # MEAN SQUARED ERROR
    mse_value = mse(GT_img, distorted_img)
    # STRUCTURAL SIMILARITY
    ssim_value = ssim(GT_img, distorted_img)
    # PEAK SIGNAL TO NOISE RATIO
    psnr_value = psnr(GT_img, distorted_img)
    # ROOT MEAN SQUARED ERROR
    rmse_value = rmse(GT_img, distorted_img)
    # VISUAL INFORMATION FIDELITY
    vif_value = vifp(GT_img, distorted_img)
    # UNIVERSAL IMAGE QUALITY INDEX
    uqi_value = uqi(GT_img, distorted_img)
    # MULTI-SCALE STRUCTURAL SIMILARITY INDEX
    msssim_value = msssim(GT_img, distorted_img)
    # PSNR-HVS-M  &  PSNR-HVS
    p_hvs_m, p_hvs = psnrhmam.color_psnrhma(GT_img, distorted_img)

    return mse_value, ssim_value, psnr_value, rmse_value, vif_value, uqi_value, msssim_value, p_hvs_m, p_hvs
コード例 #3
0
def quality(X_ori, Y_cmp, ws=8, MAX=255):
    '''
    Use MS-SSIM to calcualte the compression quality.
    Shift the pixel range up by 128 and then clip to
    [0, 255] before the algorithm. Returns a score
    in [0, 1]. A higher score suggests better quality.

    Parameters:
        X_ori: original image
        Y_cmp: compressed image
        ws: sliding window size (default 8)
        MAX: maximum value of datarange (default 255)

    Return: a float in [0, 1].

    Reference:
    "Multiscale structural similarity for image quality assessment." (2003)
    https://ieeexplore.ieee.org/abstract/document/1292216/
    '''
    X_ori, Y_cmp = [np.clip(np.round(X) + 128, 0, 255) for X in [X_ori, Y_cmp]]
    return msssim(X_ori, Y_cmp, ws=ws, MAX=MAX).real
コード例 #4
0
def get_bpg_df(directory,
               write_files=False,
               effective_bytes=True,
               force_calc=False):
    """
    Compute and return (as Pandas DF) the rate distortion curve for BPG. The result is saved
    as a CSV file in the source directory. If the file exists, the DF is loaded and returned.

    The files are saved using the reference codec: https://bellard.org/bpg/
    """

    files, _ = loading.discover_images(directory, n_images=-1, v_images=0)
    batch_x = loading.load_images(files, directory, load='y')
    batch_x = batch_x['y'].astypre(np.float32) / (2**8 - 1)

    quality_levels = np.arange(10, 40, 1)
    df_jpeg_path = os.path.join(directory, 'bpg.csv')

    if os.path.isfile(df_jpeg_path) and not force_calc:
        logger.info('Restoring BPG stats from {}'.format(df_jpeg_path))
        df = pd.read_csv(df_jpeg_path, index_col=False)
    else:
        df = pd.DataFrame(columns=[
            'image_id', 'filename', 'codec', 'quality', 'ssim', 'psnr',
            'msssim', 'msssim_db', 'bytes', 'bpp'
        ])

        with tqdm.tqdm(total=len(files) * len(quality_levels),
                       ncols=120,
                       desc='BPG') as pbar:

            for image_id, filename in enumerate(files):

                # Read the original image
                image = batch_x[image_id]

                for qi, q in enumerate(quality_levels):

                    # Compress to BPG
                    # Save as temporary file
                    imageio.imwrite('/tmp/image.png',
                                    (255 * image).astype(np.uint8))
                    bpp_path = bpg_helpers.bpg_compress(
                        '/tmp/image.png', q, '/tmp')
                    image_compressed = imageio.imread(
                        bpg_helpers.decode_bpg_to_png(bpp_path)).astype(
                            np.float) / (2**8 - 1)

                    if effective_bytes:
                        bpp = bpg_helpers.bpp_of_bpg_image(bpp_path)
                        image_bytes = round(bpp * image.shape[0] *
                                            image.shape[1] / 8)
                    else:
                        image_bytes = os.stat(bpp_path).st_size
                        bpp = 8 * image_bytes / image.shape[0] / image.shape[1]

                    if write_files:
                        image_dir = os.path.join(directory,
                                                 os.path.splitext(filename)[0])
                        if not os.path.isdir(image_dir):
                            os.makedirs(image_dir)

                        image_path = os.path.join(image_dir,
                                                  'bpg_q{:03d}.png'.format(q))
                        imageio.imwrite(image_path,
                                        (255 * image_compressed).astype(
                                            np.uint8))

                    msssim_value = msssim(image, image_compressed, MAX=1).real

                    df = df.append(
                        {
                            'image_id':
                            image_id,
                            'filename':
                            filename,
                            'codec':
                            'bpg',
                            'quality':
                            q,
                            'ssim':
                            compare_ssim(image,
                                         image_compressed,
                                         multichannel=True,
                                         data_range=1),
                            'psnr':
                            compare_psnr(image, image_compressed,
                                         data_range=1),
                            'msssim':
                            msssim_value,
                            'msssim_db':
                            -10 * np.log10(1 - msssim_value),
                            'bytes':
                            image_bytes,
                            'bpp':
                            bpp
                        },
                        ignore_index=True)

                    pbar.set_postfix(image_id=image_id, quality=q)
                    pbar.update(1)

        df.to_csv(df_jpeg_path, index=False)

    return df
コード例 #5
0
def get_jpeg2k_df(directory,
                  write_files=False,
                  effective_bytes=True,
                  force_calc=False):
    """
    Compute and return (as Pandas DF) the rate distortion curve for JPEG 2000. The result is saved
    as a CSV file in the source directory. If the file exists, the DF is loaded and returned.

    Files are saved as JPEG using glymur.
    """

    files, _ = loading.discover_images(directory, n_images=-1, v_images=0)
    batch_x = loading.load_images(files, directory, load='y')
    batch_x = batch_x['y'].astype(np.float32) / (2**8 - 1)

    # Get trade-off for JPEG
    quality_levels = np.arange(25, 45, 1)
    df_jpeg_path = os.path.join(directory, 'jpeg2000.csv')

    if os.path.isfile(df_jpeg_path) and not force_calc:
        logger.info('Restoring JPEG 2000 stats from {}'.format(df_jpeg_path))
        df = pd.read_csv(df_jpeg_path, index_col=False)
    else:
        df = pd.DataFrame(columns=[
            'image_id', 'filename', 'codec', 'quality', 'ssim', 'psnr',
            'msssim', 'msssim_db', 'bytes', 'bpp'
        ])

        with tqdm.tqdm(total=len(files) * len(quality_levels),
                       ncols=120,
                       desc='JP2k') as pbar:

            for image_id, filename in enumerate(files):

                # Read the original image
                image = batch_x[image_id]

                for qi, q in enumerate(quality_levels):

                    # TODO Use Glymur to save JPEG 2000 images to a temp file
                    image_np = (255 * image.clip(0, 1)).astype(np.uint8)
                    glymur.Jp2k('/tmp/image.jp2', data=image_np, psnr=[q])
                    if effective_bytes:
                        image_bytes = jpeg_helpers.jp2bytes('/tmp/image.jp2')
                    else:
                        image_bytes = os.path.getsize('/tmp/image.jp2')
                    image_compressed = imageio.imread('/tmp/image.jp2').astype(
                        np.float) / (2**8 - 1)

                    # TODO Use Pillow to save JPEG 2000 images to a memory buffer
                    # TODO This has been disabled = their implementation seems to be invalid
                    # with io.BytesIO() as output:
                    #     image_pillow = PIL.Image.fromarray((255*image.clip(0, 1)).astype(np.uint8))
                    #     image_pillow.save(output, format='jpeg2000', quality_layers=[q])
                    #     image_compressed = imageio.imread(output.getvalue()).astype(np.float) / (2**8 - 1)
                    #     image_bytes = len(output.getvalue())

                    if write_files:
                        image_dir = os.path.join(directory,
                                                 os.path.splitext(filename)[0])
                        if not os.path.isdir(image_dir):
                            os.makedirs(image_dir)

                        image_path = os.path.join(
                            image_dir, 'jp2_q{:.1f}dB.png'.format(q))
                        imageio.imwrite(image_path,
                                        (255 * image_compressed).astype(
                                            np.uint8))

                    msssim_value = msssim(image, image_compressed, MAX=1).real

                    df = df.append(
                        {
                            'image_id':
                            image_id,
                            'filename':
                            filename,
                            'codec':
                            'jpeg2000',
                            'quality':
                            q,
                            'ssim':
                            compare_ssim(image,
                                         image_compressed,
                                         multichannel=True,
                                         data_range=1),
                            'psnr':
                            compare_psnr(image, image_compressed,
                                         data_range=1),
                            'msssim':
                            msssim_value,
                            'msssim_db':
                            -10 * np.log10(1 - msssim_value),
                            'bytes':
                            image_bytes,
                            'bpp':
                            8 * image_bytes / image.shape[0] / image.shape[1]
                        },
                        ignore_index=True)

                    pbar.set_postfix(image_id=image_id, quality=q)
                    pbar.update(1)

        df.to_csv(df_jpeg_path, index=False)

    return df
コード例 #6
0
def get_jpeg_df(directory,
                write_files=False,
                effective_bytes=True,
                force_calc=False):
    """
    Compute and return (as Pandas DF) the rate distortion curve for JPEG. The result is saved
    as a CSV file in the source directory. If the file exists, the DF is loaded and returned.

    Files are saved as JPEG using imageio.
    """

    files, _ = loading.discover_images(directory, n_images=-1, v_images=0)
    batch_x = loading.load_images(files, directory, load='y')
    batch_x = batch_x['y'].astype(np.float32) / (2**8 - 1)

    # Get trade-off for JPEG
    quality_levels = np.arange(95, 5, -5)
    df_jpeg_path = os.path.join(directory, 'jpeg.csv')

    if os.path.isfile(df_jpeg_path) and not force_calc:
        logger.info('Restoring JPEG stats from {}'.format(df_jpeg_path))
        df = pd.read_csv(df_jpeg_path, index_col=False)
    else:
        df = pd.DataFrame(columns=[
            'image_id', 'filename', 'codec', 'quality', 'ssim', 'psnr',
            'msssim', 'msssim_db', 'bytes', 'bpp'
        ])

        with tqdm.tqdm(total=len(files) * len(quality_levels),
                       ncols=120,
                       desc='JPEG') as pbar:

            for image_id, filename in enumerate(files):

                # Read the original image
                image = batch_x[image_id]

                for qi, q in enumerate(quality_levels):

                    # Compress images and get effective bytes (only image data - no headers)
                    image_compressed, image_bytes = jpeg_helpers.compress_batch(
                        image, q, effective=effective_bytes)

                    if write_files:
                        image_dir = os.path.join(directory,
                                                 os.path.splitext(filename)[0])
                        if not os.path.isdir(image_dir):
                            os.makedirs(image_dir)

                        image_path = os.path.join(image_dir,
                                                  'jpeg_q{:03d}.png'.format(q))
                        imageio.imwrite(image_path,
                                        (255 * image_compressed).astype(
                                            np.uint8))

                    msssim_value = msssim(image, image_compressed, MAX=1).real

                    df = df.append(
                        {
                            'image_id':
                            image_id,
                            'filename':
                            filename,
                            'codec':
                            'jpeg',
                            'quality':
                            q,
                            'ssim':
                            compare_ssim(image,
                                         image_compressed,
                                         multichannel=True,
                                         data_range=1),
                            'psnr':
                            compare_psnr(image, image_compressed,
                                         data_range=1),
                            'msssim':
                            msssim_value,
                            'msssim_db':
                            -10 * np.log10(1 - msssim_value),
                            'bytes':
                            image_bytes,
                            'bpp':
                            8 * image_bytes / image.shape[0] / image.shape[1]
                        },
                        ignore_index=True)

                    pbar.set_postfix(image_id=image_id, quality=q)
                    pbar.update(1)

        df.to_csv(os.path.join(directory, 'jpeg.csv'), index=False)

    return df
コード例 #7
0
def get_diff_jpeg_df(directory,
                     write_files=False,
                     effective_bytes=True,
                     force_calc=False):
    """
    Compute and return (as Pandas DF) the rate distortion curve for diff JPEG. The result is saved
    as a CSV file in the source directory. If the file exists, the DF is loaded and returned.
    Files are saved as JPEG using imagemagick
    """

    files, _ = loading.discover_images(directory, n_images=-1, v_images=0)
    batch_x = loading.load_images(files, directory, load='y')
    batch_x = batch_x['y'].astype(np.float32) / (2**8 - 1)

    # Get trade-off for JPEG
    alpha_range = np.arange(1000, 100, -100)
    quality_levels = np.arange(95, 5, -5)
    df_jpeg_path = os.path.join(directory, 'diff_jpeg.csv')

    if os.path.isfile(df_jpeg_path) and not force_calc:
        print('Restoring diff JPEG stats from {}'.format(df_jpeg_path))
        df = pd.read_csv(df_jpeg_path, index_col=False)
    else:
        df = pd.DataFrame(columns=[
            'image_id', 'filename', 'codec', 'quality', 'ssim', 'psnr',
            'msssim', 'msssim_db', 'bytes', 'bpp'
        ])

        data = dataset.Dataset(directory,
                               n_images=1,
                               v_images=0,
                               val_n_patches=1,
                               load='y')
        batch_rgb = data.next_training_batch(0, 1, 64)
        codec_differentiable = jpeg.JPEG(50, codec='soft', trainable=True)

        with tqdm.tqdm(total=len(files) * len(quality_levels),
                       ncols=120,
                       desc='JPEG') as pbar:

            for image_id, filename in enumerate(files):

                for q_i, q in enumerate(quality_levels):

                    codec_differentiable.quality = q

                    # Read the original image
                    image = batch_x[image_id]

                    luma, chroma = codec_differentiable.train_q_table(
                        batch_rgb, alpha=1, beta=100, n_times=20)

                    with open(
                            'data/diff_jpeg/image1/quantization-table.xml', 'r'
                    ) as input_file, open(
                            'data/diff_jpeg/image1/quantization-table-diff.xml',
                            'w') as output_file:
                        for line in input_file:
                            xml_data = input_file.readlines()

                        index_1 = 43
                        for i in range(0, len(luma.numpy())):
                            q_table_this_line_luma = luma.numpy()[i]
                            xml_data[index_1 + i] = "     " + str(
                                int(q_table_this_line_luma[0])) + ", " + str(
                                    int(q_table_this_line_luma[1])
                                ) + ", " + str(int(
                                    q_table_this_line_luma[2])) + ", " + str(
                                        int(q_table_this_line_luma[3])
                                    ) + ", " + str(
                                        int(q_table_this_line_luma[4])
                                    ) + ", " + str(
                                        int(q_table_this_line_luma[5])
                                    ) + ", " + str(
                                        int(q_table_this_line_luma[6])
                                    ) + ", " + str(
                                        int(q_table_this_line_luma[7])
                                    ) + ", " + "\n"

                        index_2 = 68
                        for i in range(0, len(chroma.numpy())):
                            q_table_this_line_chroma = chroma.numpy()[i]
                            xml_data[index_2 + i] = "     " + str(
                                int(q_table_this_line_chroma[0])) + ", " + str(
                                    int(q_table_this_line_chroma[1])
                                ) + ", " + str(int(
                                    q_table_this_line_chroma[2])) + ", " + str(
                                        int(q_table_this_line_chroma[3])
                                    ) + ", " + str(
                                        int(q_table_this_line_chroma[4])
                                    ) + ", " + str(
                                        int(q_table_this_line_chroma[5])
                                    ) + ", " + str(
                                        int(q_table_this_line_chroma[6])
                                    ) + ", " + str(
                                        int(q_table_this_line_chroma[7])
                                    ) + ", " + "\n"

                        output_file.writelines(xml_data)

                    # Compress images and get effective bytes (only image data - no headers)
                    os.system(
                        'magick convert -quality ' + str(50) +
                        ' -define jpeg:q-table=data/diff_jpeg/image1/quantization-table-diff.xml'
                        + ' ' + directory + '/' + filename + ' ' + directory +
                        '/' + filename + '_' + str(50) + '_compressed.jpeg')

                    if effective_bytes:
                        with open(
                                directory + '/' + filename + "_" + str(50) +
                                '_compressed.jpeg', 'rb') as fh:
                            buf = io.BytesIO(fh.read())
                        image_bytes = JPEGMarkerStats(
                            buf.getvalue()).get_effective_bytes()
                    else:
                        image_bytes = os.path.getsize(directory + '/' +
                                                      filename + "_" +
                                                      str(50) +
                                                      '_compressed.jpeg')
                    image = imageio.imread(directory + '/' + filename).astype(
                        np.float) / (2**8 - 1)
                    image_compressed = imageio.imread(
                        directory + '/' + filename + "_" + str(50) +
                        '_compressed.jpeg').astype(np.float) / (2**8 - 1)
                    image_compressed_path = directory + '/' + filename + "_" + str(
                        50) + '_compressed.jpeg'
                    image_path = directory + '/' + filename

                    if not write_files:
                        os.remove(directory + '/' + filename + "_" + str(50) +
                                  '_compressed.jpeg')

                    msssim_value = msssim(image, image_compressed, MAX=1).real

                    df = df.append(
                        {
                            'image_id':
                            image_id,
                            'filename':
                            filename,
                            'codec':
                            'jpeg',
                            'quality':
                            q,
                            'ssim':
                            compare_ssim(image,
                                         image_compressed,
                                         multichannel=True,
                                         data_range=1),
                            'psnr':
                            compare_psnr(image, image_compressed,
                                         data_range=1),
                            'msssim':
                            msssim_value,
                            'msssim_db':
                            -10 * np.log10(1 - msssim_value),
                            #'perceptual similarity': perceptual_similarity,
                            'bytes':
                            image_bytes,
                            'bpp':
                            8 * image_bytes / image.shape[0] / image.shape[1]
                        },
                        ignore_index=True)

                    pbar.set_postfix(image_id=image_id, quality=q)
                    pbar.update(1)
        df.to_csv(os.path.join(directory, 'diff_jpeg.csv'), index=False)
    return df
コード例 #8
0
def get_jpeg_imagemagick_df(directory,
                            write_files=False,
                            effective_bytes=True,
                            force_calc=False):
    """
    Compute and return (as Pandas DF) the rate distortion curve for JPEG. The result is saved
    as a CSV file in the source directory. If the file exists, the DF is loaded and returned.

    Files are saved as JPEG using imageio.
    """

    files, _ = loading.discover_images(directory, n_images=-1, v_images=0)
    batch_x = loading.load_images(files, directory, load='y')
    batch_x = batch_x['y'].astype(np.float32) / (2**8 - 1)

    # Get trade-off for JPEG
    quality_levels = np.arange(95, 5, -5)
    df_jpeg_path = os.path.join(directory, 'jpeg.csv')

    if os.path.isfile(df_jpeg_path) and not force_calc:
        logger.info('Restoring JPEG stats from {}'.format(df_jpeg_path))
        df = pd.read_csv(df_jpeg_path, index_col=False)
    else:
        df = pd.DataFrame(columns=[
            'image_id', 'filename', 'codec', 'quality', 'ssim', 'psnr',
            'msssim', 'msssim_db', 'bytes', 'bpp'
        ])

        with tqdm.tqdm(total=len(files) * len(quality_levels),
                       ncols=120,
                       desc='JPEG') as pbar:

            for image_id, filename in enumerate(files):

                # Read the original image
                image = batch_x[image_id]

                for qi, q in enumerate(quality_levels):

                    os.system(
                        'magick convert -quality ' + str(q) +
                        ' -define jpeg:q-table=data/diff_jpeg/image1/quantization-table.xml'
                        + ' ' + directory + '/' + filename + ' ' + directory +
                        '/' + filename + '_' + str(q) + '_compressed.jpeg')

                    if effective_bytes:
                        with open(
                                directory + '/' + filename + "_" + str(q) +
                                '_compressed.jpeg', 'rb') as fh:
                            buf = io.BytesIO(fh.read())
                        image_bytes = JPEGMarkerStats(
                            buf.getvalue()).get_effective_bytes()
                    else:
                        image_bytes = os.path.getsize(directory + '/' +
                                                      filename + "_" + str(q) +
                                                      '_compressed.jpeg')
                    image = imageio.imread(directory + '/' + filename).astype(
                        np.float) / (2**8 - 1)
                    image_compressed = imageio.imread(
                        directory + '/' + filename + "_" + str(q) +
                        '_compressed.jpeg').astype(np.float) / (2**8 - 1)
                    image_compressed_path = directory + '/' + filename + "_" + str(
                        50) + '_compressed.jpeg'
                    image_path = directory + '/' + filename

                    if not write_files:
                        os.remove(directory + '/' + filename + "_" + str(q) +
                                  '_compressed.jpeg')

                    msssim_value = msssim(image, image_compressed, MAX=1).real

                    df = df.append(
                        {
                            'image_id':
                            image_id,
                            'filename':
                            filename,
                            'codec':
                            'jpeg',
                            'quality':
                            q,
                            'ssim':
                            compare_ssim(image,
                                         image_compressed,
                                         multichannel=True,
                                         data_range=1),
                            'psnr':
                            compare_psnr(image, image_compressed,
                                         data_range=1),
                            'msssim':
                            msssim_value,
                            'msssim_db':
                            -10 * np.log10(1 - msssim_value),
                            'bytes':
                            image_bytes,
                            'bpp':
                            8 * image_bytes / image.shape[0] / image.shape[1]
                        },
                        ignore_index=True)

                    pbar.set_postfix(image_id=image_id, quality=q)
                    pbar.update(1)

        df.to_csv(os.path.join(directory, 'jpeg.csv'), index=False)

    return df
コード例 #9
0
vutils.save_image(lr, "lr.png")
vutils.save_image(sr, "sr.png")
vutils.save_image(hr, "hr.png")

# Evaluate performance
src_img = cv2.imread("sr.png")
dst_img = cv2.imread("hr.png")

# Reference sources from `https://github.com/richzhang/PerceptualSimilarity`
lpips_loss = lpips.LPIPS(net="vgg").to(device)

mse_value = mse(src_img, dst_img)
rmse_value = rmse(src_img, dst_img)
psnr_value = psnr(src_img, dst_img)
ssim_value = ssim(src_img, dst_img)
ms_ssim_value = msssim(src_img, dst_img)  # 30.00+000j
niqe_value = cal_niqe("sr.png")
sam_value = sam(src_img, dst_img)
vif_value = vifp(src_img, dst_img)
lpips_value = lpips_loss(sr, hr)

print("\n")
print("====================== Performance summary ======================")
print(
    f"MSE: {mse_value:.2f}\n"
    f"RMSE: {rmse_value:.2f}\n"
    f"PSNR: {psnr_value:.2f}\n"
    f"SSIM: {ssim_value[0]:.4f}\n"
    f"MS-SSIM: {ms_ssim_value.real:.4f}\n"
    f"NIQE: {niqe_value:.2f}\n"
    f"SAM: {sam_value:.4f}\n"
コード例 #10
0
print("uqi: ", uqi)

psnr = psnr(img1, img2)
print("psnr: ", psnr)

ssim = ssim(img1, img2)
print("ssim: ", ssim)

mse = mse(img1, img2)
print("mse: ", mse)

rmse_sw = rmse_sw(img1, img2)
# print("rmse_sw: ", rmse_sw)

ergas = ergas(img1, img2)
print("ergas: ", ergas)

scc = scc(img1, img2)
print("scc: ", scc)

rase = rase(img1, img2)
print("ergas: ", rase)

sam = sam(img1, img2)
print("sam: ", sam)

msssim = msssim(img1, img2)
print("msssim: ", msssim)

vifp = vifp(img1, img2)
print("vifp: ", vifp)
コード例 #11
0
def MSSSIM(original_stacked, estimated_stacked, image_size):
    estimated1, estimated2 = unflatten(estimated_stacked, image_size)
    source1, source2 = unflatten(original_stacked, image_size)
    msssim1 = msssim(source1, estimated1, MAX=1)
    msssim2 = msssim(source2, estimated2, MAX=1)
    return msssim1, msssim2
####################################################################
#Multiscale structural similarity index
"""calculates multi-scale structural similarity index (ms-ssim).

	:param GT: first (original) input image.
	:param P: second (deformed) input image.
	:param weights: weights for each scale (default = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).
	:param ws: sliding window size (default = 11).
	:param K1: First constant for SSIM (default = 0.01).
	:param K2: Second constant for SSIM (default = 0.03).
	:param MAX: Maximum value of datarange (if None, MAX is calculated using image dtype).

	:returns:  float -- ms-ssim value.
	"""
msssim_img=full_ref.msssim(ref_img, img, weights=[0.0448, 0.2856, 0.3001, 0.2363, 0.1333], ws=11, K1=0.01, K2=0.03, MAX=None)

print("MSSSIM: multi-scale structural similarity index = ", msssim_img)


##############################################################################
#PSNR
"""calculates peak signal-to-noise ratio (psnr).

	:param GT: first (original) input image.
	:param P: second (deformed) input image.
	:param MAX: maximum value of datarange (if None, MAX is calculated using image dtype).

	:returns:  float -- psnr value in dB.
	"""
psnr_img=full_ref.psnr(ref_img, img, MAX=None)
コード例 #13
0
ファイル: test.py プロジェクト: updating00/FSRCNN-PyTorch-1
    out_img_cb = cb.resize(out_image_y.size, Image.BICUBIC)
    out_img_cr = cr.resize(out_image_y.size, Image.BICUBIC)
    out_img = Image.merge("YCbCr",
                          [out_image_y, out_img_cb, out_img_cr]).convert("RGB")
    # before converting the result in RGB
    out_img.save(f"result/{filename}")

    # Evaluate performance
    src_img = cv2.imread(f"result/{filename}")
    dst_img = cv2.imread(f"{target}/{filename}")

    total_mse_value += mse(src_img, dst_img)
    total_rmse_value += rmse(src_img, dst_img)
    total_psnr_value += psnr(src_img, dst_img)
    total_ssim_value += ssim(src_img, dst_img)
    total_ms_ssim_value += msssim(src_img, dst_img)
    total_niqe_value += cal_niqe(f"result/{filename}")
    total_sam_value += sam(src_img, dst_img)
    total_vif_value += vifp(src_img, dst_img)

    total_file += 1

print(f"Avg MSE: {total_mse_value / total_file:.2f}\n"
      f"Avg RMSE: {total_rmse_value / total_file:.2f}\n"
      f"Avg PSNR: {total_psnr_value / total_file:.2f}\n"
      f"Avg SSIM: {total_ssim_value / total_file:.4f}\n"
      f"Avg MS-SSIM: {total_ms_ssim_value / total_file:.4f}\n"
      f"Avg NIQE: {total_niqe_value / total_file:.2f}\n"
      f"Avg SAM: {total_sam_value / total_file:.4f}\n"
      f"Avg VIF: {total_vif_value / total_file:.4f}")
コード例 #14
0
def get_dcn_df(directory,
               model_directory,
               write_files=False,
               force_calc=False):
    """
    Compute and return (as Pandas DF) the rate distortion curve for the learned DCN codec.
    The result is saved as a CSV file in the source directory. If the file exists, the DF
    is loaded and returned.
    """

    # Discover test files
    files, _ = loading.discover_images(directory, n_images=-1, v_images=0)
    batch_x = loading.load_images(files, directory, load='y')
    batch_x = batch_x['y'].astype(np.float32) / (2**8 - 1)

    # Create a new table for the DCN
    df = pd.DataFrame(columns=[
        'image_id', 'filename', 'model_dir', 'codec', 'ssim', 'psnr', 'msssim',
        'msssim_db', 'entropy', 'bytes', 'bpp', 'layers', 'quantization',
        'entropy_reg', 'codebook', 'latent', 'latent_shape', 'n_features'
    ])

    # Discover available models
    model_dirs = list(Path(model_directory).glob('**/progress.json'))
    logger.info('Found {} models'.format(len(model_dirs)))

    df_path = os.path.join(
        directory, 'dcn-{}.csv'.format(
            [x for x in fsutil.split(model_directory) if len(x) > 0][-1]))

    if os.path.isfile(df_path) and not force_calc:
        logger.info('Restoring DCN stats from {}'.format(df_path))
        df = pd.read_csv(df_path, index_col=False)
    else:

        for model_dir in model_dirs:
            logger.info('Processing model dir: {}'.format(model_dir))
            dcn = codec.restore(
                os.path.split(str(model_dir))[0], batch_x.shape[1])

            # Dump compressed images
            for image_id, filename in enumerate(files):

                try:
                    batch_y, image_bytes = codec.simulate_compression(
                        batch_x[image_id:image_id + 1], dcn)
                    batch_z = dcn.compress(batch_x[image_id:image_id + 1])
                    entropy = helpers.stats.entropy(batch_z,
                                                    dcn.get_codebook())
                except Exception as e:
                    logger.error(
                        'Error while processing {} with {} : {}'.format(
                            filename, dcn.model_code, e))
                    raise e

                if write_files:
                    image_dir = os.path.join(directory,
                                             os.path.splitext(filename)[0])
                    if not os.path.isdir(image_dir):
                        os.makedirs(image_dir)

                    image_path = os.path.join(
                        image_dir,
                        dcn.model_code.replace('/', '-') + '.png')
                    imageio.imwrite(image_path,
                                    (255 * batch_y[0]).astype(np.uint8))

                msssim_value = msssim(batch_x[image_id], batch_y[0],
                                      MAX=1).real

                df = df.append(
                    {
                        'image_id':
                        image_id,
                        'filename':
                        filename,
                        'model_dir':
                        os.path.relpath(
                            os.path.split(str(model_dir))[0],
                            model_directory).replace(dcn.scoped_name, ''),
                        'codec':
                        dcn.model_code,
                        'ssim':
                        compare_ssim(batch_x[image_id],
                                     batch_y[0],
                                     multichannel=True,
                                     data_range=1),
                        'psnr':
                        compare_psnr(
                            batch_x[image_id], batch_y[0], data_range=1),
                        'msssim':
                        msssim_value,
                        'msssim_db':
                        -10 * np.log10(1 - msssim_value),
                        'entropy':
                        entropy,
                        'bytes':
                        image_bytes,
                        'bpp':
                        8 * image_bytes / batch_x[image_id].shape[0] /
                        batch_x[image_id].shape[1],
                        'layers':
                        dcn.n_layers if 'n_layers' in dcn._h else None,
                        'quantization':
                        '{}-{:.0f}bpf'.format(dcn._h.rounding, dcn.latent_bpf),
                        'entropy_reg':
                        dcn.entropy_weight,
                        'codebook':
                        dcn._h.rounding,
                        'latent':
                        dcn.n_latent,
                        'latent_shape':
                        '{}x{}x{}'.format(*dcn.latent_shape[1:]),
                        'n_features':
                        dcn.latent_shape[-1]
                    },
                    ignore_index=True)

        df.to_csv(df_path, index=False)

    return df
コード例 #15
0
    with torch.no_grad():
        sr = model(lr)

    vutils.save_image(lr, f"./benchmark/lr_{iteration}.bmp")
    vutils.save_image(sr, f"./benchmark/sr_{iteration}.bmp")
    vutils.save_image(hr, f"./benchmark/hr_{iteration}.bmp")

    # Evaluate performance
    src_img = cv2.imread(f"./benchmark/sr_{iteration}.bmp")
    dst_img = cv2.imread(f"./benchmark/hr_{iteration}.bmp")

    mse_value = mse(src_img, dst_img)
    rmse_value = rmse(src_img, dst_img)
    psnr_value = psnr(src_img, dst_img)
    ssim_value = ssim(src_img, dst_img)
    ms_ssim_value = msssim(src_img, dst_img)
    niqe_value = cal_niqe(f"./benchmark/sr_{iteration}.bmp")
    sam_value = sam(src_img, dst_img)
    vif_value = vifp(src_img, dst_img)
    lpips_value = lpips_loss(sr, hr)

    total_mse_value += mse_value
    total_rmse_value += rmse_value
    total_psnr_value += psnr_value
    total_ssim_value += ssim_value[0]
    total_ms_ssim_value += ms_ssim_value.real
    total_niqe_value += niqe_value
    total_sam_value += sam_value
    total_vif_value += vif_value
    total_lpips_value += lpips_value.item()
コード例 #16
0
def main_function(file_name1, file_name2, no):
    global file_name_upscaled
    global file_save_name
    # Second window created 
    top = Toplevel()
    top.title("second window")
    top.configure(bg="#333333")

    # read passed images 
    img = cv2.imread(file_name1)
    ref_img = cv2.imread(file_name2)

    """
                                    XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
                                                    Interpolation
                                    XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
    """

    # Linear interpolation
    if no == 0:
        img_int = cv2.resize(img,(384,384),fx=0,fy=0, interpolation = cv2.INTER_LINEAR)
        file_save_name = 'linear_interpolation.png'
        cv2.imwrite(file_save_name, img_int)

    # Bicubic interpolation
    elif no == 1:
        img_int = cv2.resize(img,(384,384),fx=0,fy=0, interpolation = cv2.INTER_CUBIC)
        file_save_name = 'bicubic_interpolation.png'
        cv2.imwrite(file_save_name, img_int) 
    
    # Lanczos Interpolation
    elif no == 2:
        img_int = cv2.resize(img,(384,384),fx=0,fy=0, interpolation = cv2.INTER_LANCZOS4)
        file_save_name = 'Lanczos.png'
        cv2.imwrite(file_save_name, img_int)
    
    # Nearest Neighbor Interpolation 
    elif no == 3:
        img_int = cv2.resize(img,(384,384),fx=0,fy=0, interpolation = cv2.INTER_NEAREST)
        file_save_name = 'Nearest.png'
        cv2.imwrite(file_save_name, img_int) 
    
    # Pixel area Resampling
    elif no == 4:
        img_int = cv2.resize(img,(384,384),fx=0,fy=0, interpolation = cv2.INTER_AREA)
        file_save_name = 'pixel.png'
        cv2.imwrite(file_save_name, img_int)

    elif no == 5:
        pass
    
    else:
        pass


    """
                                    XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
                                                 Designing TKINTER GUI
                                    XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
    """

    # display labels 
    label_HR = Label(top, text="HR Image", bg="#333333", fg="#fff")
    label_HR.grid(row=0, column=2)
    label_LR = Label(top, text="LR Image", bg="#333333", fg="#fff")
    label_LR.grid(row=0, column=0)
    label_Upscaled = Label(top, text="Upscaled Image", bg="#333333", fg="#fff")
    label_Upscaled.grid(row=0, column=1)

    # display the image

    # display HR image
    disp_HR = ImageTk.PhotoImage(Image.open(file_name2))  
    label_HR_img = Label(top, image = disp_HR, bg="#333333", fg="#fff")
    label_HR_img.grid(row=1, column=2)

    # display LR image
    disp_LR = ImageTk.PhotoImage(Image.open(file_name1))  
    label_LR_img = Label(top, image = disp_LR, bg="#333333", fg="#fff")
    label_LR_img.grid(row=1, column=0)

    # display upscaled image
    file_name_upscaled = os.path.abspath(file_save_name)
    disp = ImageTk.PhotoImage(Image.open(file_name_upscaled))
    label_1 = Label(top, image=disp, bg="#333333", fg="#fff")
    label_1.grid(row=1, column=1)

    # calculate the MS-SSIM and DISTS
    msssim_img= full_ref.msssim(ref_img, img_int, weights=[0.0448, 0.2856, 0.3001, 0.2363, 0.1333], ws=11, K1=0.01, K2=0.03, MAX=None)
    dists_img = arguments(file_name2, file_name_upscaled)

    
    # display the calculated error
    label_msssim = Label(top, text = "MS-SSIM: "+str(msssim_img.real), bg="#333333", fg="#fff")
    label_msssim.grid(row=2, column=0, columnspan=2)
    print(msssim_img.real)

    label_dists = Label(top, text = "DISTS: "+str(dists_img), bg="#333333", fg="#fff")
    label_dists.grid(row=2, column=2)
    print(dists_img)

    label_msssim.config(font=("Courier", 18))
    label_dists.config(font=("Courier", 18))

    # end of program
    mainloop()
コード例 #17
0
for i in total_files:
    f_name = str(i).split('.')[0]
    img_clean = cv2.imread(clean_dir + str(f_name) + '.jpg')
    img_pred = cv2.imread(result_dir + str(f_name) + '.png')

    uq = uqi(img_clean, img_pred)
    uqi_list.append(uq)

    ms = mse(img_clean, img_pred)
    mse_list.append(ms)

    vi = vifp(img_clean, img_pred)
    vif_list.append(vi)

    mss = msssim(img_clean, img_pred)
    msssim_list.append(mss)

    cnt += 1

    print('Mean  Our  UQI ' + str(np.mean(uqi_list)))
    print('Mean  Our  MSE ' + str(np.mean(mse_list)))
    print('Mean  Our  VIF ' + str(np.mean(vif_list)))
    print('Mean  Our  MSSSIM ' + str(np.mean(msssim_list)))

    print(str(cnt) + '\n')

# msssim_list=[]
# mse_list=[]
# tv_list=[]