예제 #1
0
def bootstrapping_analysis(res):
    random.seed(1)
    bootstrapping = {
        t: dict(rmse=[], mae=[])
        for t in res['t_period'].unique()
    }
    for i in range(1000):
        for t in res['t_period'].unique():
            samp = resample(res[res['t_period'] == t],
                            replace=True,
                            n_samples=800)
            rmse = mean_squared_error(samp['y'], samp['pred'])**0.5
            mae = mean_absolute_error(samp['y'], samp['pred'])
            bootstrapping[t]['rmse'].append(rmse)
            bootstrapping[t]['mae'].append(mae)

    bootstrapping[99] = dict(rmse=[], mae=[])
    for i in range(1000):
        samp = resample(res, replace=True, n_samples=800)
        rmse = mean_squared_error(samp['y'], samp['pred'])**0.5
        mae = mean_absolute_error(samp['y'], samp['pred'])
        bootstrapping[99]['rmse'].append(rmse)
        bootstrapping[99]['mae'].append(mae)

    return bootstrapping
예제 #2
0
def test_inpaint_biharmonic_2d_color_deprecated():
    img = img_as_float(data.astronaut()[:64, :64])

    mask = np.zeros(img.shape[:2], dtype=bool)
    mask[8:16, :16] = 1
    img_defect = img * ~mask[..., np.newaxis]
    mse_defect = mean_squared_error(img, img_defect)

    # providing multichannel argument positionally also warns
    channel_warning = "`multichannel` is a deprecated argument"
    matrix_warning = "the matrix subclass is not the recommended way"
    with expected_warnings([channel_warning + '|' + matrix_warning]):
        img_restored = inpaint.inpaint_biharmonic(img_defect,
                                                  mask,
                                                  multichannel=True)
    mse_restored = mean_squared_error(img, img_restored)

    assert mse_restored < 0.01 * mse_defect

    # providing multichannel argument positionally also warns
    channel_warning = "Providing the `multichannel` argument"
    with expected_warnings([channel_warning + '|' + matrix_warning]):
        img_restored = inpaint.inpaint_biharmonic(img_defect, mask, True)
    mse_restored = mean_squared_error(img, img_restored)

    assert mse_restored < 0.01 * mse_defect
예제 #3
0
def log( index, gtImg, noisy, gfiltered, nlmfiltered,  params, gaussian = False, salted = False):
  '''
  This function logs the results in a .csv file.
  The skimage library is used to compute the MSE and PSNR
  '''

  f = open('OUTPUT/LOGS/' +str(index)+'-LOG.csv','a')
  if gaussian:
    f.write('Gaussian Noise\n')
  elif salted:
    f.write('Salt and Pepper Noise\n')

  f.write('Params: ' + str(params) + '\n')
  f.write('NOISY,GAUSSIAN FILTER on NOISE,NLM FILTER on NOISE\n')
  f.write(str(peak_signal_noise_ratio(gtImg, noisy)))
  f.write(',')
  f.write(str(peak_signal_noise_ratio(gtImg, gfiltered)))
  f.write(',')
  f.write(str(peak_signal_noise_ratio(gtImg, nlmfiltered)))
  f.write('\n')
  f.write(str(mean_squared_error(gtImg, noisy)))
  f.write(',')
  f.write(str(mean_squared_error(gtImg, gfiltered)))
  f.write(',')
  f.write(str(mean_squared_error(gtImg, nlmfiltered)))
  f.write('\n\n')
예제 #4
0
def test_NRMSE_no_int_overflow():
    camf = cam.astype(np.float32)
    cam_noisyf = cam_noisy.astype(np.float32)
    assert_almost_equal(mean_squared_error(cam, cam_noisy),
                        mean_squared_error(camf, cam_noisyf))
    assert_almost_equal(normalized_root_mse(cam, cam_noisy),
                        normalized_root_mse(camf, cam_noisyf))
예제 #5
0
def nrmse_similarity(image_1, image_2, norm_mode="Min max"):
    """
    Normalized root mean squared error (NRMSE).

    :param image_1: The image 1 for comparison
    :type image_1: numpy.ndarray
    :param image_2: The image 2 for comparison
    :type image_2: numpy.ndarray
    :param norm_mode: The mode for the normalization, average mode use the max (||image_1||, ||image_2||) \
                 Min max use the max(image_1 value range, image_2 value range)
    :type norm_mode: str
    :return: The score that measure the similarity between two images in range [0,1] using NRMSE \
             0 is the least similar, 1 is the most similar (same)
    :rtype: float
    """
    image_1 = image_1.astype("float64")
    image_2 = image_2.astype("float64")
    if norm_mode == "Average norm":
        image_1_avg_norm = np.sqrt(np.mean(image_1 * image_1))
        image_2_avg_norm = np.sqrt(np.mean(image_2 * image_2))
        denom = max(image_1_avg_norm, image_2_avg_norm)
    elif norm_mode == "Min max":
        image_1_min_max = image_1.max() - image_1.min()
        image_2_min_max = image_2.max() - image_1.min()
        denom = max(image_1_min_max, image_2_min_max)

    score = 1 - np.sqrt(mean_squared_error(image_1, image_2)) / denom

    return score
예제 #6
0
def compare_imgs(im1, im2, fil_out):
    with warnings.catch_warnings():
        warnings.simplefilter(
            "ignore")  # ignore skimage's deprecation warinings.

        # structural similarity index
        # [ss, im] = metrics.structural_similarity(im1,im2,multichannel=True)
        [ss, im] = metrics.structural_similarity(im1,
                                                 im2,
                                                 gaussian_weights=True,
                                                 sigma=1.5,
                                                 use_sample_covariance=False,
                                                 multichannel=True,
                                                 full=True)
        if fil_out:
            im8 = skimage.img_as_ubyte(np.clip(im, -1.0, 1.0))
            save_img(fil_out, im8)

        # mean square error
        mse = metrics.mean_squared_error(im1, im2)

        # normalized root mean squared error
        nrmse = metrics.normalized_root_mse(im1, im2)

        # peak signal-to-noise ratio
        psnr = metrics.peak_signal_noise_ratio(im1, im2)

        return [ss, mse, nrmse, psnr]
예제 #7
0
파일: meta.py 프로젝트: mozanunal/SparseCT
    def _eval(self, test_loader):
        self.net.eval()
        rmse_list = []
        ssim_list = []
        psnr_list = []
        for projs, gts in tqdm(test_loader):
            projs = projs.type(self.DTYPE)
            gts = gts.type(self.DTYPE)
            x_iter = self.net(
                self.ir(projs)
            )
            
            for i in range(projs.shape[0]):
                gt = torch_to_np(gts[i:i+1])
                x_iter_npy = np.clip(torch_to_np(x_iter[i:i+1]), 0, 1).astype(np.float64)

                rmse_list.append(mean_squared_error(x_iter_npy, gt))
                ssim_list.append(structural_similarity(x_iter_npy, gt, multichannel=False))
                psnr_list.append(peak_signal_noise_ratio(x_iter_npy, gt))
                # print('{}/{}- psnr: {:.3f} - ssim: {:.3f} - rmse: {:.5f}'.format(
                #     self.name, i, psnr_list[-1], ssim_list[-1], rmse_list[-1],
                # ))
        print('EVAL_RESULT {}/{}- psnr: {:.3f} - ssim: {:.3f} - rmse: {:.5f}'.format(
                self.name, self.i_iter, np.mean(psnr_list), np.mean(ssim_list), np.mean(rmse_list),
            ))
예제 #8
0
def ssim_sk(reference_img, test_img):
    reference_img = reference_img.astype(np.float64)
    test_img = test_img.astype(np.float64)
    return (
        ssim(reference_img, test_img, multichannel=True),
        mean_squared_error(reference_img, test_img),
    )
def run(params):
    RTimageLocation = params['inputRTImagePath']
    GTimageLocation = params['inputGTImagePath']
    resultLocation = params['resultPath']
    resultLocationAdj = params['resultPathAdj']

    # Checking existence of temporary files (individual channels)
    if not os.path.exists(RTimageLocation):
        print(f'Error: {RTimageLocation} does not exist')
        return
    if not os.path.exists(GTimageLocation):
        print(f'Error: {GTimageLocation} does not exist')
        return

    # Loading input images
    RTData = imread(RTimageLocation)
    GTData = imread(GTimageLocation)
    print(f'Dimensions of Restored image: {RTData.shape}')
    print(f'Dimensions of GT image: {GTData.shape}')

    # Checking dtype is the same for both input channels
    if GTData.dtype != RTData.dtype:
        error_mes = "The bit depth of your input channels is not the same. Convert one of them and retry."
        ctypes.windll.user32.MessageBoxW(0, error_mes, 'Error', 0)
        sys.exit(error_mes)

    # Histogram matching
    matched_GTData = match_histograms(GTData, RTData).astype(RTData.dtype)

    # MSE measurement
    # valMSE = skimage.measure.compare_mse(RTData, GTData) # deprecated in scikit-image 0.18
    valMSE = mean_squared_error(RTData, matched_GTData)
    print(
        f'___ MSE = {valMSE} ___'
    )  # Value appears in the log if Verbosity option is set to 'Everything'

    # SSIM measurement
    outFullSSIM = structural_similarity(RTData, matched_GTData, full=True)

    # Extracting mean value (first item)
    outMeanSSIM = outFullSSIM[0]
    print(f'___ Mean SSIM = {outMeanSSIM} ___')

    # Extracting map (second item)
    outSSIM = outFullSSIM[1]
    print(f'Bit depth of SSIM array: {outSSIM.dtype}')

    # Convert output array whose range is [0-1] to adjusted bit range (8- or 16-bit) if necessary
    if RTData.dtype != np.dtype('float64') and RTData.dtype != np.dtype(
            'float32'):
        outputData = rescale_intensity(outSSIM,
                                       in_range=(0, 1),
                                       out_range=(0,
                                                  np.iinfo(RTData.dtype).max))
        outputData = outputData.astype(RTData.dtype)
    else:
        outputData = outSSIM

    imsave(resultLocation, outputData)
    imsave(resultLocationAdj, matched_GTData)
예제 #10
0
def test_inpaint_biharmonic_2d_color(channel_axis):
    img = img_as_float(data.astronaut()[:64, :64])

    mask = np.zeros(img.shape[:2], dtype=bool)
    mask[8:16, :16] = 1
    img_defect = img * ~mask[..., np.newaxis]
    mse_defect = mean_squared_error(img, img_defect)

    img_defect = np.moveaxis(img_defect, -1, channel_axis)
    img_restored = inpaint.inpaint_biharmonic(img_defect,
                                              mask,
                                              channel_axis=channel_axis)
    img_restored = np.moveaxis(img_restored, channel_axis, -1)
    mse_restored = mean_squared_error(img, img_restored)

    assert mse_restored < 0.01 * mse_defect
예제 #11
0
def compute_metrics(seriesX, seriesY, metric, window):
    # Smooth the data if needed
    if window != 'none':
        seriesX = smooth(x=seriesX, window_len=5, window=window)
        seriesY = smooth(x=seriesY, window_len=5, window=window)

    if seriesX.shape != seriesY.shape:
        raise ValueError('Only accepts signals that have the same shape.')

    # Compute the similarity metrics
    if metric == 'zncc': return getZNCC(seriesX, seriesY)
    if metric == 'ssim':
        return ski_metrics.structural_similarity(seriesX, seriesY)
    if metric == 'psnr': return getPSNR(seriesX, seriesY)
    if metric == 'f-test': return stats.f_oneway(seriesX, seriesY)[0]

    seriesX, seriesY = pd.Series(seriesX), pd.Series(seriesY)

    if metric == 'pearsonr': return seriesX.corr(seriesY, method='pearson')
    if metric == 'spearmanr': return seriesX.corr(seriesY, method='spearman')
    if metric == 'kendalltau': return seriesX.corr(seriesY, method='kendall')

    # Compute dissimilarity metrics
    if metric == 'mse': return ski_metrics.mean_squared_error(seriesX, seriesY)
    if metric == 'nrmse':
        return ski_metrics.normalized_root_mse(seriesX, seriesY)
    if metric == 'me': return getME(seriesX, seriesY)
    if metric == 'mae': return getMAE(seriesX, seriesY)
    if metric == 'msle': return getMSLE(seriesX, seriesY)
    if metric == 'medae': return getMedAE(seriesX, seriesY)
예제 #12
0
파일: metrics.py 프로젝트: YaoshiHuang/LSIM
    def forward(self, x):
        input1 = x["reference"].cpu().numpy().astype(np.uint8)
        input2 = x["other"].cpu().numpy().astype(np.uint8)

        sizeIn = input1.shape

        distance = np.empty((sizeIn[0], sizeIn[1]))
        for i in range(sizeIn[0]):
            for j in range(sizeIn[1]):
                in1 = np.transpose(input1[i, j], [1, 2, 0])
                in2 = np.transpose(input2[i, j], [1, 2, 0])
                if self.mode == "L2":
                    distance[i, j] = metrics.mean_squared_error(
                        in1, in2) / (255.0 * 255.0)
                elif self.mode == "SSIM":
                    distance[i, j] = 1 - metrics.structural_similarity(
                        in1, in2,
                        multichannel=True)  #invert as distance measure
                elif self.mode == "PSNR":
                    distance[i, j] = -metrics.peak_signal_noise_ratio(
                        in1, in2)  #invert as distance measure
                elif self.mode == "MI":
                    distance[i, j] = np.mean(
                        metrics.variation_of_information(in1, in2))
        return torch.from_numpy(distance)
예제 #13
0
    def on_epoch_end(self, epoch, logs={}):
        if epoch % 5 == 1:
            data, ans = next(test_generator)
            data = [data[x][5][np.newaxis, :, :, :] for x in range(picnum)]
            ans = ans[5]
            import matplotlib
            cmap = matplotlib.cm.gray
            cmap.set_bad(color='black')

            pred = model.predict(
                data)  #shape:(1,11,11,1) because last layer is conv not dense
            #pred = np.clip(pred,0,1)
            #pred = (pred-np.min(pred))/(np.max(pred)-np.min(pred))
            #pred = softmax(pred)
            pred[0, 0, 0, 0] = 0
            pred[0, -1, -1, 0] = 1
            ans[0, 0] = 0
            ans[-1, -1] = 1
            for i in range(6):
                plt.subplot(231 + i)
                plt.imshow(data[i][0, :, :, 0], cmap=cmap)
            plt.show()
            plt.subplot(121)
            plt.imshow(pred[0, :, :, 0], cmap=cmap)
            plt.colorbar()
            #ans = np.clip((ans-np.percentile(ans,10))/(np.percentile(ans,90)-np.percentile(ans,10)),0,1)
            plt.subplot(122)
            plt.imshow(ans, cmap=cmap)
            plt.colorbar()
            plt.show()
            #      print('psnr',peak_signal_noise_ratio(np.clip(pred[0,:,:,0],0,1),ans))
            #      print('ssim',structural_similarity(np.clip(pred[0,:,:,0],0,1),ans))
            print('mse',
                  mean_squared_error(np.clip(pred[0, :, :, 0], 0, 1), ans))
예제 #14
0
def compute_metrics(df1, df2, metrics, window):
    number_of_columns1 = len(df1.columns)
    number_of_columns2 = len(df2.columns)

    # { var1_names, var2_names, metric1: 2d array, metric2: ... }}
    result = {
        'var1_names': df1.columns[1:].tolist(),
        'var2_names': df2.columns[1:].tolist()
    }
    for m in metrics:
        result[m] = np.zeros((number_of_columns1 - 1, number_of_columns2 - 1))

    # Using nested for loops to create a 2-D matrix
    for i in range(number_of_columns1 - 1):
        for j in range(number_of_columns2 - 1):
            X = df1.columns[i + 1]
            Y = df2.columns[j + 1]

            # Remove missing data if needed
            seriesX = removeMissingData(df1[X])
            seriesY = removeMissingData(df2[Y])

            # Smooth the data if needed
            if window != 'none':
                seriesX = smooth(x=seriesX, window_len=5, window=window)
                seriesY = smooth(x=seriesY, window_len=5, window=window)

            if seriesX.shape != seriesY.shape:
                raise ValueError('Only accepts signals that have the same shape.')

            # Compute the similarity metrics
            if 'zncc' in metrics: result['zncc'][i][j] = getZNCC(seriesX, seriesY)
            if 'ssim' in metrics: result['ssim'][i][j] = ski_metrics.structural_similarity(seriesX, seriesY)
            if 'psnr' in metrics: result['psnr'][i][j] = getPSNR(seriesX, seriesY)

            # Note: added by Phong
            if 'f-test' in metrics: result['f-test'][i][j] = stats.f_oneway(seriesX, seriesY)[0]

            # Use pandas corr to ingnore inf and nan.
            seriesX, seriesY = pd.Series(seriesX), pd.Series(seriesY)

            if 'pearsonr' in metrics: result['pearsonr'][i][j] = seriesX.corr(seriesY, method='pearson')
            if 'spearmanr' in metrics: result['spearmanr'][i][j] = seriesX.corr(seriesY, method='spearman')
            if 'kendalltau' in metrics: result['kendalltau'][i][j] = seriesX.corr(seriesY, method='kendall')

            # Compute dissimilarity metrics
            if 'mse' in metrics: result['mse'][i][j] = ski_metrics.mean_squared_error(seriesX, seriesY)
            if 'nrmse' in metrics: result['nrmse'][i][j] = ski_metrics.normalized_root_mse(seriesX, seriesY)
            if 'me' in metrics: result['me'][i][j] = getME(seriesX, seriesY)
            if 'mae' in metrics: result['mae'][i][j] = getMAE(seriesX, seriesY)
            if 'msle' in metrics: result['msle'][i][j] = getMSLE(seriesX, seriesY)
            if 'medae' in metrics: result['medae'][i][j] = getMedAE(seriesX, seriesY)

    # Note: added by Phong
    # Replace NaN with 0 for serialisation
    for m in metrics:
        result[m] = np.nan_to_num(result[m])

    return result
예제 #15
0
def approximate(image=None, nsteps=20000, px=1200):

    target = np.array(Image.open(image).convert(
        "RGB")) if image is not None else randomImage(px)

    no_imgs = len(os.listdir("imgs")) // 2
    (dx, dy) = (target.shape[0], target.shape[1])

    gray = random.randint(0, 255)
    image = Image.new(mode="RGB", size=(dx, dy), color=(gray, gray, gray))
    draw = ImageDraw.Draw(image)

    mse = float("inf")
    steps = nsteps

    vectors = []
    for _ in range(0, 3):
        x = random.randint(-50, 50)
        y = random.randint(-50, 50)
        for s in range(-2, 2, 1):
            vectors.append((s * x, s * y))

    champion = (None, float("inf"))

    while steps > 0:

        p1 = (random.randint(0, dx - 1), random.randint(0, dy - 1))
        cp = (p1[1], p1[0])
        colour = (target[cp][0], target[cp][1], target[cp][2])

        choices = []
        for v in vectors:
            p2 = vadd(p1, v)
            choices.append((p1, p2, colour))

        best_candidate = champion

        for config in choices:
            copy = image.copy()
            cdraw = ImageDraw.Draw(copy)
            cdraw.line([config[0], config[1]],
                       width=random.randint(5, 10),
                       fill=config[2])
            ref = np.array(copy)

            mse = metrics.mean_squared_error(target, ref)
            steps -= 1
            if mse < best_candidate[1]:
                best_candidate = (copy, mse)
                break

        if best_candidate[1] < champion[1]:
            champion = best_candidate
            image = champion[0]

    # Save image
    fn = str(len(os.listdir('imgs'))) + ".png"
    champion[0].save("./imgs/" + fn)
    return fn
예제 #16
0
def bijiao(img_1, img_2):
    psnr = metrics.peak_signal_noise_ratio(img_1, img_2)
    mse = metrics.mean_squared_error(img_1, img_2)
    ssim = metrics.structural_similarity(img_1, img_2, multichannel=True)
    print(float('%.2f' % psnr))
    print(float('%.2f' % mse))
    print(float('%.2f' % ssim))
    print('\n')
예제 #17
0
def check_mse(X, km, PSNR_TH):
    mse_TH = 255**2 / pow(10, PSNR_TH / 10)
    idx = km.predict(X)
    res = km.cluster_centers_[idx]
    mse = mean_squared_error(X, res)
    if mse > mse_TH:
        return mse, False
    return mse, True
예제 #18
0
def compression(image_path, save_folder, sample_percentages):
    # Define vectors to hold metric results.
    ssim_results = np.zeros(len(sample_percentages))
    mse_results = np.zeros(len(sample_percentages))
    psnr_results = np.zeros(len(sample_percentages))

    # Read in image and calculate dimensions.
    original_image, ny, nx, n_channels = read_image(image_path, as_gray=False)

    final_result = np.zeros(original_image.shape, dtype='uint8')
    masks = np.zeros(original_image.shape, dtype='uint8')

    # Iterate through each sample percentage value.
    for i, sample_percentage in enumerate(sample_percentages):
        print(f'Samples = {100 * sample_percentage}%')
        start = time()

        # Get random sample indices so they're the same for all channels
        ri = generate_random_samples(nx * ny, sample_percentage)

        # Iterate through each color channel
        for j in range(n_channels):
            # Randomly sample from the image with the given percentage.
            # Retrieve the samples (b) and the masked image.
            b, masks[:, :, j] = create_mask(original_image[:, :, j], ri)

            # Compute results using OWL-QN
            final_result[:, :, j] = owl_qn_cs(original_image[:, :, j], nx, ny,
                                              ri, b)

        # Compute Structural Similarity Index (SSIM) of
        # reconstructed image versus original image.
        ssim_results[i] = structural_similarity(original_image,
                                                final_result,
                                                data_range=final_result.max() -
                                                final_result.min(),
                                                multichannel=True)
        mse_results[i] = mean_squared_error(original_image, final_result)
        psnr_results[i] = peak_signal_noise_ratio(
            original_image,
            final_result,
            data_range=final_result.max() - final_result.min())

        # Save images.
        imageio.imwrite(
            f'results/{save_folder}mask_{trunc( 100 * sample_percentage )}.png',
            masks)
        imageio.imwrite(
            f'results/{save_folder}recover_{trunc( 100 * sample_percentage )}.png',
            final_result)

        print(f'Elapsed Time: {time() - start:.3f} seconds.\n')

    for i, sample_percentage in enumerate(sample_percentages):
        print(
            f'{trunc( 100 * sample_percentage ): 6.2f}%:\n    SSIM: {ssim_results[ i ]}\n    MSE:  {mse_results[ i ]}\n    PSNR: {psnr_results[ i ]}\n'
        )
예제 #19
0
def images_equal(
    image_path_1: str,
    image_path_2: str,
) -> bool:

    return mean_squared_error(
        cv2.imread(image_path_1),
        cv2.imread(image_path_2),
    ) < 0.0001
예제 #20
0
 def test_mse1(self):
     if has_skimage:
         #%%  Check Mean Squared error for random image and images
         res1 = mse(self.id_coins, self.id_coins_noisy)
         res2 = mean_squared_error(self.id_coins.as_array(), self.id_coins_noisy.as_array())
         print('Check MSE for CAMERA image gaussian noise')
         np.testing.assert_almost_equal(res1, res2, decimal=5)
     else:
         self.skipTest("scikit0-image not present ... skipping")
예제 #21
0
    def rate(self, image, orig):
        """rate how similar image is to the original

        :image: the image to be rated as a numpy array
        :orig: the original image to be compared to, as a numpy array
        :returns: a floating point number indicating the similarity or dissimilarity

        """
        return mean_squared_error(image, orig)
예제 #22
0
    def compute(self, image, image_test):

        # mae = mean_absolute_error(self.image, self.image_test)
        mae = np.average(np.abs(image-image_test))
        mse = mean_squared_error(image, image_test)
        psnr = peak_signal_noise_ratio(image, image_test, data_range=255)
        ssim = structural_similarity(image, image_test, data_range=255)

        return mae, mse, psnr, ssim
예제 #23
0
def calculate_similarity(img_1, img_2):
    '''
  Enter image 1 and image 2 to calcualte mean square error and structural similarity.
  PSNR and SSIM are not used as they are more semantic metrics. Only MSE is used from this function.
  '''
    mse_1 = mean_squared_error(img_1, img_2)
    ssim_1 = ssim(img_1, img_2)  # data_range=img_2.max() - img_2.min())
    psnr = cv2.PSNR(final, bin_mask_1)
    #print("PSNR:", round(psnr,4))
    print("MSE:", round(mse_1, 5))
예제 #24
0
def rmse_imgs(img1, img2):
    """ Calculate the rmse between two images """
    try:
        rmse = mean_squared_error(img_as_float(img1), img_as_float(img2))
        return rmse
    except ValueError:
        print(
            f'RMS issue, Img1: {img1.size[0]} {img1.size[1]}, Img2: {img2.size[0]} {img2.size[1]}'
        )
        raise KeyboardInterrupt
예제 #25
0
def Compare2ImagesFromLibIMMSE(first_image: str, second_image: str) -> None:
    image1 = cv2.imread(first_image)
    image2 = cv2.imread(second_image)

    start = time.time()
    error = mean_squared_error(image1, image2)
    end = time.time()
    print('The mean-squared error from skimage.metrics.mean_squared_error is ' +
          str(error) + ' time ' + str(end - start))
    return error
예제 #26
0
    def test_mse2(self):
        if has_skimage:
            #%%  Check Mean Squared error for random image and images

            res1 = mse(self.dc1, self.dc2)
            res2 = mean_squared_error(self.dc1.as_array(), self.dc2.as_array())
            print('Check MSE for random ImageData')
            np.testing.assert_almost_equal(res1, res2, decimal=5)

        else:
            self.skipTest("scikit0-image not present ... skipping")
예제 #27
0
def mse(a, b):
    if a.ndim == 4 and a.shape[0] == 1:
        a = a.squeeze()

    if b.ndim == 4 and b.shape[0] == 1:
        b = b.squeeze()

    if a.ndim == 3 and b.ndim == 3:
        return metrics.mean_squared_error(a, b)

    elif a.ndim == 4 and b.ndim == 4:
        out = np.zeros((a.shape[0],))
        for i in range(a.shape[0]):
            out[i] = mse(a[i], b[i])
        return out

    else:
        raise ValueError('Incompatible tensor shapes! {} and {}'.format(a.shape, b.shape))

    return metrics.mean_squared_error(a.squeeze(), b.squeeze())
예제 #28
0
def calculate_metrics_1d(gt_img, recon_img, verbose=True):
    snr = calculate_snr(gt_img, recon_img)
    mse = mean_squared_error(gt_img, recon_img)

    if verbose:
        print('============================')
        print(f'SNR: {snr}')
        print(f'MSE: {mse}')
        print('============================')

    return snr, mse
예제 #29
0
def parallel_comparison(args):
    orig_path, comp_path = args
    orig_img = io.imread(orig_path)
    comp_img = io.imread(comp_path)
    comparisons = []
    comparisons.append(
        metrics.structural_similarity(orig_img, comp_img, multichannel=True))
    comparisons.append(metrics.peak_signal_noise_ratio(orig_img, comp_img))
    comparisons.append(metrics.mean_squared_error(orig_img, comp_img))
    comparisons.append(metrics.normalized_root_mse(orig_img, comp_img))
    return comparisons
예제 #30
0
파일: handler.py 프로젝트: wozimer/applypy
    def mse(self, image) -> float:
        """
        Compute the mean-squared error between two images.
        :param image: Image to compare, must have same shape.
        :return: the mean-squared error (MSE) metric.
        """

        img1 = self._obj.mode.to_greyscale()
        img2 = image.mode.to_greyscale()

        return mean_squared_error(img1, img2)