Example #1
0
def compare(how, img1, img2):
    """
	Perform SSIM and find diff contours on the result
	"""

    # full=True gives us both the similarity score and the diff map
    score, diff = structural_similarity(img1, img2, full=True)
    diff = (diff * 255).astype("uint8")

    #cv2.namedWindow(f'{how} diff', cv2.WINDOW_NORMAL)
    #cv2.imshow(f'{how} diff', diff)
    #cv2.waitKey(0)

    return diff
Example #2
0
def get_ssim_two_templates(cropped_im1, cropped_im2):
    h1, w1 = cropped_im1.shape[:2]
    h2, w2 = cropped_im2.shape[:2]

    if w1 / w2 > 1.5 or w1 / w2 < (1 / 1.5):
        return 0.0

    if not cropped_im2.shape == cropped_im1.shape:
        return 0.0

    ssim_res1 = structural_similarity(img_as_float(cropped_im1),
                                      img_as_float(cropped_im2),
                                      multichannel=False)
    return ssim_res1
Example #3
0
def compute_psnr_and_ssim(image1, image2, border_size=0):
    """
    Computes PSNR and SSIM index from 2 images.
    We round it and clip to 0 - 255. Then shave 'scale' pixels from each border.
    """
    if border_size > 0:
        image1 = image1[border_size:-border_size, border_size:-border_size, :]
        image2 = image2[border_size:-border_size, border_size:-border_size, :]

    psnr = peak_signal_noise_ratio(image1, image2, data_range=255)
    ssim = structural_similarity(image1, image2, win_size=11, gaussian_weights=True, multichannel=True, K1=0.01,
                                 K2=0.03,
                                 sigma=1.5, data_range=255)
    return psnr, ssim
Example #4
0
def compare_baseline_HSV(baseline, img, rect=None):  # Auf HSV umstellen?
    if rect is None:
        rect = [0, 0, baseline.shape[1], baseline.shape[0]]
    newBase = baseline.copy()
    newImg = img.copy()
    # Convert images to grayscale
    before_HSV = cv2.cvtColor(newBase[rect[1]:rect[3], rect[0]:rect[2]],
                              cv2.COLOR_BGR2HSV)
    after_HSV = cv2.cvtColor(newImg[rect[1]:rect[3], rect[0]:rect[2]],
                             cv2.COLOR_BGR2HSV)

    # set saturation and value channels to 0
    before_H = before_HSV[:, :, 0]

    after_H = after_HSV[:, :, 0]

    # Compute SSIM between two images
    (score, diff) = structural_similarity(before_H, after_H, full=True)

    ######-------------------------------- Score auch in Pixel umrechnen und einbauen in gegriffen erkennung

    # print("Image similarity", score)

    # The diff image contains the actual image differences between the two images
    # and is represented as a floating point data type in the range [0,1]
    # so we must convert the array to 8-bit unsigned integers in the range
    # [0,255] before we can use it with OpenCV
    diff = (diff * 255).astype("uint8")

    # Threshold the difference image, followed by finding contours to
    # obtain the regions of the two input images that differ
    thresh = cv2.threshold(diff, 0, 255,
                           cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[0] if len(contours) == 2 else contours[1]

    mask = np.zeros(newBase.shape, dtype='uint8')
    filled_after = (newImg[rect[1]:rect[3], rect[0]:rect[2]])

    for c in contours:
        area = cv2.contourArea(c)
        if area > 40:
            x, y, w, h = cv2.boundingRect(c)
            #cv2.rectangle(newBase, (x, y), (x + w, y + h), (36, 255, 12), 2)
            #cv2.rectangle(newImg, (x, y), (x + w, y + h), (36, 255, 12), 2)
            cv2.drawContours(mask, [c], 0, (0, 255, 0), -1)
            cv2.drawContours(filled_after, [c], 0, (0, 255, 0), -1)
    ScorePix = (filled_after.shape[0] * filled_after.shape[1]) * score
    return filled_after, score, ScorePix
Example #5
0
def compute_ssim(ground_truth, test_image):
    '''
    :param ground_truth: the ground truth images, format: [batch_size, w, h, d, c]
    :param test_image: the image you want to test
    :return: SSIM for a batch
    '''
    ssim = 0.0

    for i, item in enumerate(test_image):
        ssim_per_image = measure.structural_similarity(
            ground_truth[i, :, :, :, 0], item[:, :, :, 0])
        ssim += ssim_per_image

    return float(ssim / (i + 1))
Example #6
0
def get_image_quality(format, image_path):
    gray_image = cv2.imread('./embed/' + image_path, cv2.IMREAD_GRAYSCALE)
    print('./embed/' + image_path)
    print('./embed/' + os.path.splitext(image_path)[0] + '.' + format)
    embed_image = cv2.imread('./embed/monarch_lsb.' + format,
                             cv2.IMREAD_GRAYSCALE)

    psnr = peak_signal_noise_ratio(gray_image, embed_image)
    psnr = '{:.4f}'.format(psnr)
    ssim = structural_similarity(gray_image, embed_image)
    ssim = '{:.8f}'.format(ssim)
    print('psnr: ', psnr)
    print('ssim: ', ssim)
    return psnr, ssim
Example #7
0
    def get_similarity(self, alphabet_letter):
        angle_range = range(-45, 46, 15)
        highest_similarity = None

        for angle in angle_range:
            rotated_letter = CaptchaCharacter(rotate(alphabet_letter, angle))
            similarity = structural_similarity(self.image,
                                               rotated_letter.image,
                                               multichannel=True,
                                               data_range=100.0)
            if highest_similarity == None or highest_similarity < similarity:
                highest_similarity = similarity

        return highest_similarity
Example #8
0
def validation(args, model, device, validation_loader, epoch):
    model.eval()

    validation_loss = 0
    psnr = 0
    ssim = 0

    with torch.no_grad():
        for samples, t in validation_loader:
            image_t1 = samples['image_1'].to(device)
            image_t2 = samples['image_2'].to(device)
            image_t3 = samples['image_3'].to(device)
            target = t['target'].to(device)
            output, fusion, image_mean = model(image_t1, image_t2, image_t3)
            loss_f = nn.MSELoss()
            real = target.cpu().numpy()
            pred = output.cpu().numpy()
            inp = image_mean.cpu().numpy()
            for i in range(real.shape[0]):
                validation_loss += loss_f(output[i, :, :, :],
                                          target[i, :, :, :])
                psnr += skm.peak_signal_noise_ratio(
                    real[i, :, :, :],
                    pred[i, :, :, :],
                    data_range=real[i, :, :, :].max() - real[i, :, :, :].min())
                ssim_bands = 0
                for j in range(real.shape[1]):
                    ssim_bands += skm.structural_similarity(
                        real[i, j, :, :],
                        pred[i, j, :, :],
                        data_range=real[i, j, :, :].max() -
                        real[i, j, :, :].min())

                ssim = ssim + ssim_bands / real.shape[1]

        tb.add_scalar("Loss_validation",
                      validation_loss / (len(validation_loader.dataset)),
                      epoch)
        tb.add_scalar("SSIM_validation",
                      ssim / (len(validation_loader.dataset)), epoch)
        tb.add_scalar("PSNR_validation",
                      psnr / (len(validation_loader.dataset)), epoch)

    validation_loss /= len(validation_loader.dataset)
    psnr /= len(validation_loader.dataset)
    ssim /= len(validation_loader.dataset)

    print(
        '\nValidation set: Average loss: {:.4f}, PSNR: ({:.2f} dB), SSIM: ({:.2f})\n'
        .format(validation_loss, psnr, ssim))
Example #9
0
    def execute(
        self, template_object: np.ndarray, target_object: np.ndarray, *_, **__
    ) -> FindItEngineResponse:
        resp = FindItEngineResponse()

        resized_target = cv2.resize(
            target_object, template_object.shape[::-1], interpolation=cv2.INTER_CUBIC
        )
        ssim = structural_similarity(resized_target, template_object)

        resp.append("conf", self.__dict__)
        resp.append("ssim", ssim, important=True)
        resp.append("ok", True, important=True)
        return resp
Example #10
0
    def _flag_photo_for_copy(self, photo: Path):
        with Image.open(photo) as img:  # type: JpegImageFile
            width, height = img.size
            if width < height:
                return None

            gray = self.grayscale(img)
            for exist in self.existing_images:
                if exist.size != gray.size:
                    continue
                if structural_similarity(gray, exist, multichannel=True) > 0.9:
                    return None

            return photo
def get_ssim_score(image_fake, image_real):
    gray_real = cv2.cvtColor(image_real, cv2.COLOR_BGR2GRAY)
    gray_fake = cv2.cvtColor(image_fake, cv2.COLOR_BGR2GRAY)

    (score, diff_image) = structural_similarity(gray_real,
                                                gray_fake,
                                                full=True)

    # diff_image = (diff_image * 255).astype("uint8")
    # image_service.show_image(diff_image)

    print("SSIM: {}".format(score))

    return score
Example #12
0
def estimate_velocity(first_image, second_image, dt):
    """Find the relative shift between the two tiles."""
    # Use structural similarity index as a weight for frame transaltion computation.
    sim, diff = structural_similarity(first_image, second_image, full=True)
    if sim == 1:
        print(sim)
    mask = np.ones_like(first_image).astype(bool)
    delta = feature.masked_register_translation(second_image,
                                                first_image,
                                                mask,
                                                overlap_ratio=3 / 10)
    # Reorient to translate matrix motion to 2D image.
    delta[0] = delta[0] * -1
    return delta / dt, sim
def ssim(x, y):

    if isinstance(x, torch.Tensor): x = x.cpu().squeeze(1).numpy()
    if isinstance(y, torch.Tensor): y = y.cpu().squeeze(1).numpy()

    d_range = y.max() - y.min()

    # Batch images
    if x.ndim == 3:
        ssim = sum(
            map(
                lambda i: structural_similarity(x[i], y[i], data_range=d_range
                                                ), range(len(x))))

        return ssim / len(x)

    elif x.ndim == 2:
        return structural_similarity(x, y, data_range=d_range)

    else:
        raise ValueError(
            f"Number of dims is not compatible with ssim function is {x.ndims}, must be '4' (Tensors), '2','3' (np.ndarray)"
        )
Example #14
0
 def compare(self, f_quote: str, multichannel=True):
     images = [self.image_base.copy(), cv2.imread(f_quote)]
     shape_min = (min([i.shape[1]
                       for i in images]), min([i.shape[0] for i in images]))
     logging.debug('Comparing size: %s', shape_min)
     for i in range(len(images)):
         images[i] = Image.fromarray(images[i]).resize(
             shape_min, Image.ANTIALIAS)
         if not multichannel:
             images[i] = images[i].convert('1')
         images[i] = numpy.array(images[i])
     return structural_similarity(images[0],
                                  images[1],
                                  multichannel=multichannel)
Example #15
0
def run(params):
	RTimageLocation = params['inputRTImagePath']
	GTimageLocation = params['inputGTImagePath']
	resultLocation = params['resultPath']
	resultLocationAdj = params['resultPathAdj']
	
	# Checking existence of temporary files (individual channels)
	if not os.path.exists(RTimageLocation):
		print(f'Error: {RTimageLocation} does not exist')
		return; 
	if not os.path.exists(GTimageLocation):
		print(f'Error: {GTimageLocation} does not exist')
		return; 
		
	# Loading input images
	RTData = imread(RTimageLocation)
	GTData = imread(GTimageLocation)
	print(f'Dimensions of Restored image: {RTData.shape}')
	print(f'Dimensions of GT image: {GTData.shape}')
	
	# Histogram matching
	matched_GTData = match_histograms(GTData, RTData).astype(RTData.dtype)
	
	# MSE measurement
	# valMSE = skimage.measure.compare_mse(RTData, GTData) # deprecated in scikit-image 0.18 
	valMSE = mean_squared_error(RTData, matched_GTData)
	print(f'___ MSE = {valMSE} ___')	# Value appears in the log if Verbosity option is set to 'Everything'
	
	# SSIM measurement
	outFullSSIM = structural_similarity(RTData, matched_GTData, full=True)
	
	# Extracting mean value (first item)
	outMeanSSIM = outFullSSIM[0]
	print(f'___ Mean SSIM = {outMeanSSIM} ___')
	
	# Extracting map (second item)
	outSSIM = outFullSSIM[1]
	print(f'Bit depth of SSIM array: {outSSIM.dtype}')
	
	# Convert output array whose range is [0-1] to adjusted bit range (8- or 16-bit)
	if RTData.dtype is np.dtype('u2'):
		outputData = img_as_uint(outSSIM)
	elif RTData.dtype is np.dtype('f4'):
		outputData = img_as_float32(outSSIM)	# necessary?
	else:
		outputData = img_as_ubyte(outSSIM)
	
	imsave(resultLocation, outputData)	
	imsave(resultLocationAdj, matched_GTData)
Example #16
0
def metrics_example(dataframe, noise_class_list):
    """Show metrics example 

    Arguments:
        dataframe {Dataframe} -- Dataframe that contains images path
        noise_class_list {List} -- List of noise type

    Returns:
        Dataframe -- Dataframe that contain metrics example
    """
    path = dataframe.iloc[0, 0]
    orignal_img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
    orignal_img = np.array(orignal_img, np.float32)

    images = [orignal_img]

    df_error = pd.DataFrame({
        "Noise": [],
        "MSE": [],
        "NRMSE": [],
        "PSNR": [],
        "SSIM": []
    })

    for noise in noise_class_list:
        noised_img = noise.add(orignal_img)
        images.append(noised_img)

        noise_name = noise.__class__.__name__
        mse = metrics.mean_squared_error(orignal_img, noised_img)
        nrmse = metrics.normalized_root_mse(orignal_img, noised_img)
        psnr = metrics.peak_signal_noise_ratio(orignal_img,
                                               noised_img,
                                               data_range=255)
        ssim = metrics.structural_similarity(orignal_img, noised_img)

        df_error = df_error.append(
            {
                "Noise": noise_name,
                "MSE": mse,
                "NRMSE": nrmse,
                "PSNR": psnr,
                "SSIM": ssim
            },
            ignore_index=True)

    plot_im_grid_from_list(images, 5, 2)
    df_error.head(len(noise_class_list))
    return df_error
def train(experiment_name, model, train_loader, optimizer, criterion, epoch, save_freq, device):
    epoch_loss = 0.0
    psnr = 0.0
    ssim = 0.0
    total_cnt = 0
    for idx, (inps, targets) in tqdm(enumerate(train_loader), total=len(train_loader),
                                   desc='{} epoch={}'.format('train', epoch), ncols=80, leave=False):

        
        inps = inps.to(device)
        targets = targets.to(device)
        
        outs = model(inps)
        outs = torch.clamp(outs, 0, 1)

        # write image to tensorboard
        img_grid = torchvision.utils.make_grid(outs)

        optimizer.zero_grad()

        loss = criterion(targets, outs)
        
        for (out, target) in zip(outs, targets):
            out    = out.detach().cpu().numpy().transpose(1, 2, 0) * 255
            target = target.detach().cpu().numpy().transpose(1, 2, 0) * 255
            
            psnr += peak_signal_noise_ratio(target, out, data_range=255)
            ssim += structural_similarity(target, out, data_range=255, gaussian_weights=True, use_sample_covariance=False, multichannel=True)
            
            saved_dir = 'result/{}/{:04d}'.format(experiment_name, epoch)
            if not os.path.exists(saved_dir):
                os.makedirs(saved_dir, exist_ok=True)

            fname = saved_dir + '/{:04d}.jpg'.format(idx)
            
            temp = np.concatenate((target[:, :, :], out[:, :, :]), axis=1)
            im = Image.fromarray(np.uint8(temp))
            im.save(fname)

            total_cnt += 1
        
        epoch_loss += loss.item()

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    # end epoch 
    
    return epoch_loss / len(train_loader), psnr/total_cnt, ssim/total_cnt
Example #18
0
def calculate_ssim(img0, img1, data_range=None):
    """Calculate SSIM (Structural SIMilarity).

    Args:
        img0 (ndarray)
        img1 (ndarray)
        data_range (int, optional): Distance between minimum and maximum 
            possible values). By default, this is estimated from the image 
            data-type.
    
    Return:
        ssim (float)
    """
    ssim = skm.structural_similarity(img0, img1, data_range=data_range)
    return ssim
Example #19
0
def compare_images(image1, image2):
    """ This function measures the index of similarity
        of two images.
    """

    # Get two images - resize both to 1024 x 1024
    img_a = resize(cv.imread(image1), (2**10, 2**10))
    img_b = resize(cv.imread(image2), (2**10, 2**10))

    # If score == 1 it means that images are equal
    score, diff = structural_similarity(img_a,
                                        img_b,
                                        full=True,
                                        multichannel=True)
    return score
Example #20
0
def ssim(img_true, img_pred, y_channel=True):
    """SSIM with color space transform."""
    if y_channel:
        y_true = rgb_to_ycbcr(to_uint8(img_true, 0, 255), 255)[:, :, 0]
        y_pred = rgb_to_ycbcr(to_uint8(img_pred, 0, 255), 255)[:, :, 0]
    else:
        y_true = to_uint8(img_true, 0, 255)
        y_pred = to_uint8(img_pred, 0, 255)

    ssim_val = metrics.structural_similarity(y_true,
                                             y_pred,
                                             data_range=y_pred.max() -
                                             y_pred.min(),
                                             multichannel=not y_channel)
    return ssim_val
Example #21
0
def test_structural_similarity_dtype(dtype):
    N = 30
    X = np.random.rand(N, N)
    Y = np.random.rand(N, N)
    if np.dtype(dtype).kind in 'iub':
        X = (X * 255).astype(np.uint8)
        Y = (X * 255).astype(np.uint8)
    else:
        X = X.astype(dtype, copy=False)
        Y = Y.astype(dtype, copy=False)

    S1 = structural_similarity(X, Y)
    assert S1.dtype == np.float64

    assert S1 < 0.1
Example #22
0
def compute_ssim(image1, image2):
    image1 = img_as_float(image1)
    image2 = img_as_float(image2)
    ssim_value = -10.0

    if image1.shape[2] != image2.shape[2]:
        sys.stderr.write(
            "ERROR: Images need to have the same number of channels to be compared, ie. you can not compare RGB vs grayscale or RGBA vs RGB, etc. Please convert the images accordantly.\n"
        )
        exit(1)

    if image1.shape[:2] != image2.shape[:2]:
        tmp_ssim1 = structural_similarity(image1,
                                          resize(image2, image1.shape[:2]),
                                          multichannel=True)
        tmp_ssim2 = structural_similarity(resize(image1, image2.shape[:2]),
                                          image2,
                                          multichannel=True)
        ssim_value = (tmp_ssim1 + tmp_ssim2) / 2.0

    else:
        ssim_value = structural_similarity(image1, image2, multichannel=True)

    return ssim_value
Example #23
0
def similarity_ssim(image_path1, image_path2, dimension):
    gray = False
    image1 = get_image(image_path1,
                       resize_size=(dimension, dimension),
                       ndarray=True,
                       gray=gray)
    image2 = get_image(image_path2,
                       resize_size=(dimension, dimension),
                       ndarray=True,
                       gray=gray)
    ssim = structural_similarity(image1,
                                 image2,
                                 multichannel=~gray,
                                 data_range=image2.max() - image2.min())
    return ssim, image1, image2
Example #24
0
def calculate_psnr_and_ssim(psnrs, ssims, gt, predict):
    gt = np.where(gt > 1, 1, gt)
    gt = np.where(gt < 0, 0, gt)
    predict = np.where(predict > 1, 1, predict)
    predict = np.where(predict < 0, 0, predict)

    psnr = peak_signal_noise_ratio(gt, predict)
    ssim = structural_similarity(gt, predict, multichannel=True)
    print("psnr : ", psnr)
    print("ssim : ", ssim)
    psnrs.append(psnr)
    ssims.append(ssim)
    print(np.mean(psnrs))
    print(np.mean(ssims))
    return psnrs, ssims
Example #25
0
File: utils.py Project: sqyon/MDFN
 def append(self, x):
     ground_truth = x['gt']
     predict = x['output']
     shape = list(ground_truth.shape)
     N, C, Hv, Wv, Hp, Wp = shape
     gt = ground_truth.permute(0, 2, 3, 4, 5, 1)
     gt = gt.reshape(N * Hv * Wv, Hp, Wp, C)
     y_hat = predict.permute(0, 2, 3, 4, 5, 1)
     y_hat = y_hat.reshape(N * Hv * Wv, Hp, Wp, C)
     gt = gt.detach().cpu().numpy()
     y_hat = y_hat.detach().cpu().numpy()
     y_hat = np.clip(y_hat, 16 / 255, 235 / 255)
     for i in range(gt.shape[0]):
         self.psnr_list.append(peak_signal_noise_ratio(gt[i], y_hat[i]))
         self.ssim_list.append(structural_similarity(gt[i], y_hat[i], multichannel=True))
Example #26
0
def ssim(gt: np.ndarray, pred: np.ndarray, maxval: np.ndarray = None) -> float:
    """Compute Structural Similarity Index Metric (SSIM)"""
    if gt.ndim != 3:
        raise ValueError("Unexpected number of dimensions in ground truth.")
    if gt.ndim != pred.ndim:
        raise ValueError("Ground truth dimensions does not match pred.")

    maxval = np.max(gt) if maxval is None else maxval

    _ssim = sum(
        structural_similarity(
            gt[slice_num], pred[slice_num], data_range=maxval)
        for slice_num in range(gt.shape[0]))

    return _ssim / gt.shape[0]
Example #27
0
def calculate_structual_similarity_np(img_a: np.ndarray, img_b: np.ndarray) -> Tuple[float, np.ndarray]:
  #img_b = imageio.imread(path_original_plot)
  have_same_height = img_a.shape[0] == img_b.shape[0]
  have_same_width = img_a.shape[1] == img_b.shape[1]
  assert have_same_height and have_same_width
  score, diff_img = structural_similarity(
    im1=img_a,
    im2=img_b,
    full=True,
    multichannel=True
  )
  #imageio.imsave(path_out, diff)
  # to prevent -> "WARNING:imageio:Lossy conversion from float64 to uint8. Range [-0.9469735935228797, 1.0000000000019036]."
  #diff_img = diff_img.astype(np.uint8)
  return score, diff_img
Example #28
0
def test_denoise_tv_chambolle_weighting():
    # make sure a specified weight gives consistent results regardless of
    # the number of input image dimensions
    rstate = np.random.RandomState(1234)
    img2d = astro_gray.copy()
    img2d += 0.15 * rstate.standard_normal(img2d.shape)
    img2d = np.clip(img2d, 0, 1)

    # generate 4D image by tiling
    img4d = np.tile(img2d[..., None, None], (1, 1, 2, 2))

    w = 0.2
    denoised_2d = restoration.denoise_tv_chambolle(img2d, weight=w)
    denoised_4d = restoration.denoise_tv_chambolle(img4d, weight=w)
    assert_(structural_similarity(denoised_2d, denoised_4d[:, :, 0, 0]) > 0.99)
Example #29
0
def test_ssim():
    image = _read_image()
    image_batch = []
    image_noise_batch = []
    single_image_ssim = []
    N_repeat = 5
    for sigma in range(0, 101, 20):
        noise = sigma * np.random.rand(*image.shape)
        image_noise = (image + noise).astype(np.float32).clip(0, 255)

        for _ in range(N_repeat):
            ssim_skimage = structural_similarity(
                image,
                image_noise,
                win_size=11,
                multichannel=True,
                sigma=1.5,
                data_range=255,
                use_sample_covariance=False,
                gaussian_weights=True,
            )

        image_torch = (torch.from_numpy(image).unsqueeze(0).permute(
            0, 3, 1, 2))  # 1, C, H, W
        image_noise_torch = (
            torch.from_numpy(image_noise).unsqueeze(0).permute(0, 3, 1, 2))

        image_batch.append(image_torch)
        image_noise_batch.append(image_noise_torch)

        for _ in range(N_repeat):
            ssim_torch = batch_ssim(image_noise_torch,
                                    image_torch,
                                    win_size=11,
                                    data_range=255)

        ssim_torch = ssim_torch.numpy()
        single_image_ssim.append(ssim_torch)
        assert np.allclose(ssim_torch, ssim_skimage, atol=5e-4)

    image_batch = torch.cat(image_batch, dim=0)
    image_noise_batch = torch.cat(image_noise_batch, dim=0)
    ssim_batch = batch_ssim(image_noise_batch,
                            image_batch,
                            win_size=11,
                            data_range=255,
                            reduction="none")
    assert np.allclose(ssim_batch, single_image_ssim, atol=5e-4)
Example #30
0
    def _similarity(images: np.ndarray,
                    reference_index: int,
                    mode: str,
                    window_size: int,
                    temporal_median: Union[bool, np.ndarray] = False) -> Tuple[int, float]:
        """
        Internal function to compute the MSE as defined by Ruane et al. 2019.
        """

        @typechecked
        def _temporal_median(reference_index: int,
                             images: np.ndarray) -> np.ndarray:
            """
            Internal function to calculate the temporal median for all frames, except the one with
            the ``reference_index``.
            """

            image_m = np.concatenate((images[:reference_index], images[reference_index+1:]))

            return np.median(image_m, axis=0)

        image_x_i = images[reference_index]

        if isinstance(temporal_median, bool):
            image_m = _temporal_median(reference_index, images=images)
        else:
            image_m = temporal_median

        if mode == 'MSE':
            return reference_index, mean_squared_error(image_x_i, image_m)

        if mode == 'PCC':
            # calculate the covariance matrix of the flattened images
            cov_mat = np.cov(image_x_i.flatten(), image_m.flatten(), ddof=1)

            # the variances are stored in the diagonal, therefore take the sqrt to obtain std
            std = np.sqrt(np.diag(cov_mat))

            # does not matter whether [0, 1] or [1, 0] as cov_mat is symmetric
            return reference_index, cov_mat[0, 1] / (std[0] * std[1])

        if mode == 'SSIM':
            # winsize needs to be odd
            if int(window_size) % 2 == 0:
                winsize = int(window_size) + 1
            else:
                winsize = int(window_size)
            return reference_index, structural_similarity(image_x_i, image_m, win_size=winsize)