Example #1
0
 def compare_images(self, im1, im2, imageA, imageB):
     # compute the mean squared error and structural similarity
     # index for the images
     m = self.mse(imageA, imageB)
     s = ssim(imageA, imageB)
     tres = args['threshold']
     self.totalCompare += 1
     if s >= tres:
         print("Image[{c1}] '{p1}' compared to Image[{c2}] '{p2}' Simility:{sim}".format(c1=im1['comp'], c2=im2['comp'],p1=im1['path'], p2=im2['path'], sim=str(s)))
         twin = np.hstack([imageA, imageB])
         cv2.imshow('', twin)
         cv2.waitKey(0)
         self.searching = False
         self.totalFound += 1
         companyTotal = self.companyResults.get(img)
     elif self.searching is False:
         print('Searching...')
         self.searching = True
Example #2
0
 def crop(self, img):
     max_ssim = 0
     bbox = None
     resized = np.asarray(img.resize((192, 108)))
     height = img.height
     width = img.width
     for idx, ref in enumerate(self.references):
         cur_ssim = ssim(ref, resized, multichannel=True)
         if cur_ssim > max_ssim:
             max_ssim = cur_ssim
             bbox = self.reference_bboxes[idx]
     pil_bbox = [
         np.round(bbox[0] * width),
         np.round(bbox[1] * height),
         np.round(bbox[2] * width),
         np.round(bbox[3] * height)
     ]
     return img.crop(bbox)
Example #3
0
def compare_images(imageA, imageB, title):
	# compute the mean squared error and structural similarity
	# index for the images
	m = mse(imageA, imageB)
	s = ssim(imageA, imageB)
	# setup the figure
	fig = plt.figure(title)
	plt.suptitle("MSE: %.2f, SSIM: %.2f" % (m, s))
	# show first image
	ax = fig.add_subplot(1, 2, 1)
	plt.imshow(imageA, cmap = plt.cm.gray)
	plt.axis("off")
	# show the second image
	ax = fig.add_subplot(1, 2, 2)
	plt.imshow(imageB, cmap = plt.cm.gray)
	plt.axis("off")
	# show the images
	plt.show()
def compare_images(path1, path2, imageA, imageB):
    # compute the mean squared error and structural similarity
    # index for the images
    m = mse(imageA, imageB)
    s = ssim(imageA, imageB)
    tres = args['threshold']

    searchingWritten = False
    if s >= tres:
        print("Image '{0}' comparet to '{1}' Simility:'{2}".format(
            [path1, path2, str(m), str(tres),
             str(s)]))
        twin = np.hstack([imageA, imageB])
        cv2.imshow('', twin)
        cv2.waitKey(0)
    elif searchingWritten is not True:
        print('Searching...')
        searchingWritten = True
Example #5
0
def showtime(imageRef, imageMod, imageModRGB):
    (score, diff) = ssim(imageRef, imageMod, full=True)
    diff = (diff * 255).astype("uint8")
    # st.sidebar.text("SSIM: {}".format(score))
    thresh = cv2.threshold(diff, 0, 255,
                           cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    contador = 0
    for c in cnts:
        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(imageModRGB, (x, y), (x + w, y + h), (255, 0, 0), 2)
        print(cv2.boundingRect(c))
        contador = contador + 1
    print(contador)

    return contador, score, imageModRGB
Example #6
0
def get_ssim(image_labels, attacked_image_labels):
    """
    Given an unattacked and an attacked ImageLabels objects, calculate the SSIM score between them
    
    Args:
        image_labels: the ImageLabels object for the unattacked image
        attacked_imaged_labels: the ImageLabels object for the attacked image
    Returns:
        ssim_val: the SSIM value calculated between the two images
    """
    img = io.imread(image_labels.img_path)
    attack_img = io.imread(attacked_image_labels.img_path)
    ssim_val = ssim(
            img,
            attack_img,
            data_range = attack_img.max() - attack_img.min(),
            multichannel=True)
    return ssim_val
Example #7
0
def main():
    argparser = argparse.ArgumentParser()
    argparser.add_argument('--image_dir', type=str, required=True)
    argparser.add_argument('--image_list_file', type=str, required=True)
    argparser.add_argument('--ssim_matrix_file', type=str, required=True)
    argparser.add_argument('--mse_matrix_file', type=str, required=True)
    argparser.add_argument('--image_symlink_dir', type=str, required=True)
    args = argparser.parse_args()

    image_dir = Path(args.image_dir)

    images = []
    for image in image_dir.glob('**/*'):
        if image.is_dir():
            continue
        images.append(image)

    # Save image list so we can identify images later
    images.sort()
    image_list_file = open(args.image_list_file, 'w')
    zfill_len = len(str(len(images)))
    for i in range(len(images)):
        image_list_file.write(f'{i}, {images[i]}\n')
        symlink_dir = Path(args.image_symlink_dir) / Path(
            str(i).zfill(zfill_len))
        symlink_dir.mkdir(exist_ok=True, parents=True)
        symlink_target = Path('../' * len(symlink_dir.parts)) / images[i]
        (symlink_dir / images[i].name).symlink_to(symlink_target)

    image_list_file.close()

    ssim_matrix_file = open(args.ssim_matrix_file, 'w')
    mse_matrix_file = open(args.mse_matrix_file, 'w')
    for i in range(len(images)):
        i_img = np.array(Image.open(images[i]))
        for j in range(i + 1, len(images)):
            j_img = np.array(Image.open(images[j]))
            ssim_matrix_file.write(
                str(ssim(i_img, j_img, multichannel=True)) + ',')
            mse_matrix_file.write(str(mean_squared_error(i_img, j_img)) + ',')
        ssim_matrix_file.write('\n')
        mse_matrix_file.write('\n')
    ssim_matrix_file.close()
    mse_matrix_file.close()
Example #8
0
def test_single(net, img, target, image_name, model_name, test_image):
    net.eval()
    tens = transforms.ToTensor()
    h, w, c = img.shape
    inp = tens(img).float()

    inp = inp.view((1, c, h, w))
    output = net(inp)

    o = output.view((c, h * 4, w * 4))
    o = o.data.numpy()
    o = np.swapaxes(o, 0, 1)
    o = np.swapaxes(o, 1, 2)

    bicub_res = rescale(img, (4, 4, 1), anti_aliasing=True)
    result = np.clip(o + bicub_res, 0., 1.)
    result = np.clip(correct_color_shift(target, result, samples=100), 0., 1.)
    if result.shape != target.shape:
        w1, h1, c1 = result.shape
        w2, h2, c2 = target.shape
        if w1 < w2:
            target = target[0:w1, :, :]
        elif w1 > w2:
            result = result[0:w2, :, :]
        if h1 < h2:
            target = target[:, 0:h1, :]
        elif h1 > h2:
            result = result[:, 0:h2, :]
    # PSNR
    score = psnr(result, target) * 1.10326
    sim = ssim(result, target, multichannel=True)
    if image_name == test_image:
        fig, ax1 = plt.subplots(1, 1)
        ax1.imshow(result)
        ax1.set_title(model_name)
        plt.show()
        #plt.imsave('results_reconstr/{m}/{n}'.format(m=os.path.splitext(model_name)[0], n=image_name), result)
    """if model_name == 'ENet-E.pth':
        io.imsave('quality_assessment/E/{x}'.format(x=image_name), result)
    elif model_name == 'ENet-PAT.pth':
        io.imsave('quality_assessment/PAT/{x}'.format(x=image_name), result)"""

    #print('Image name: %s  PSNR: %f  SSIM: %f' % (image_name, score, sim))
    return score, sim
Example #9
0
def test_skgpr_2d(kernel):
    R = np.load(test_data)
    R_ = np.load(test_expected_result)
    X = gprutils.get_sparse_grid(R)
    X_true = gprutils.get_full_grid(R)
    mean, _ = skgpr.skreconstructor(X,
                                    R,
                                    X_true,
                                    kernel=kernel,
                                    lengthscale=[[1., 1.], [4., 4.]],
                                    grid_points_ratio=1.,
                                    learning_rate=0.1,
                                    iterations=20,
                                    calculate_sd=False,
                                    num_batches=1,
                                    use_gpu=False,
                                    verbose=False).run()
    assert_(ssim(mean, R_) > 0.98)
    assert_(np.linalg.norm(mean - R_) < 1)
Example #10
0
 def compute_similarity(self, im, im_deg, sim_method='mutual_info'):
     im = np.array(im)
     im_deg = np.array(im_deg)
     if sim_method == 'mutual_info':
         return mutual_information(im, im_deg)
     elif sim_method == 'dice':
         return f1_score(
             im.astype(bool).ravel(),
             im_deg.astype(bool).ravel())
     elif sim_method == 'ssim':
         return ssim(im.astype(bool), im_deg.astype(bool))
     elif sim_method == 'var_info':
         under_seg, over_seg = variation_of_information(
             im.astype(bool), im_deg.astype(bool))
         return 1 - (under_seg + over_seg)
     elif sim_method == 'mse':
         return 1 - mean_squared_error(
             im.astype(bool).ravel(),
             im_deg.astype(bool).ravel())
Example #11
0
def bjorn_score(image1: np.ndarray, image2: np.ndarray) -> float:
    """Calculates the Bjorn score of two input images.
    0    = Two images are perfectly similar
    <0.1 = Two images are structurally similar
    1    = Two images are not similar

    Arguments:
        image1 {np.ndarray} -- Image 1
        image2 {np.ndarray} -- Image 2

    Returns:
        float -- bjorn score of image1 and image2
    """
    if image1.shape != image2.shape:
        # Images not matching
        return 1.0

    score = 1.0 - ssim(image1, image2, multichannel=True)
    return score
Example #12
0
File: eval.py Project: fagp/exerc
def compute_test_ssim(test_dataset, model):
    ssim_val = []
    model.eval()
    device = (torch.device("cuda:0")
              if torch.cuda.is_available() else torch.device("cpu"))
    model.to(device)
    for image, target in test_dataset:
        pred = model(wrap_input(image).to(device))
        pred = unwrap_output(pred[0])
        target = unwrap_output(target)
        ssim_val.append(
            ssim(
                target,
                pred,
                data_range=pred.max() - pred.min(),
                multichannel=True,
            ))

    return np.array(ssim_val).mean()
Example #13
0
def SSIM(X_test, X_hat, start_time=0):

    num_images, num_timesteps, _, _ = X_test.shape
    ssim_values = []

    for idx in range(num_images):
        for t in range(start_time, num_timesteps):
            img_truth = X_test[idx, t, :, :]
            img_hat = X_hat[idx, t, :, :]
            ssim_img = ssim(img_truth,
                            img_hat,
                            data_range=img_truth.max() - img_truth.min())
            if math.isnan(ssim_img):
                continue
            ssim_values.append(ssim_img)

    ssim_model = np.mean(ssim_values)

    return ssim_model, ssim_values
def check_bug4():
    folder_bug = 'bug_right/'
    files = os.listdir(folder_bug)
    img_bug = [file for file in files if file.startswith('bug')]
    img = Image.open("current_screen.png")
    left = 305
    top = 42
    right = 320
    bottom = 72
    im1 = img.crop((left, top, right, bottom))
    im1.save('current_test.png')
    imgA = cv2.imread("current_test.png")
    for elem in img_bug:
        imgB = cv2.imread(folder_bug + elem)
        s = ssim(imgA, imgB, multichannel=True)
        if s > 0.9:
            print(s)
            return True
    return False
Example #15
0
def calculate_metrics(gt_img: np.ndarray,
                      recon_img: np.ndarray,
                      verbose=True) -> tuple:
    """
    Display PSNR, SSIM, SNR and MSE for reconstructed image
    against ground truth.

    Args:
        gt_img: groud truth image
        recon_img: reconstructed image
        verbose: wether to print the metrics or just return them
    Returns:
        tuple of metrics
    """
    assert gt_img.shape == recon_img.shape

    if isinstance(gt_img, Tensor):
        gt_img = gt_img.cpu().detach().numpy()
    if isinstance(recon_img, Tensor):
        recon_img = recon_img.cpu().detach().numpy()

    gt_img = np.array(gt_img, dtype=np.float64)
    recon_img = np.array(recon_img, dtype=np.float64)

    psnr = peak_signal_noise_ratio(gt_img,
                                   recon_img,
                                   data_range=recon_img.max() -
                                   recon_img.min())
    img_ssim = ssim(gt_img,
                    recon_img,
                    data_range=recon_img.max() - recon_img.min())
    snr = calculate_snr(gt_img, recon_img)
    mse = mean_squared_error(gt_img, recon_img)

    if verbose:
        print('============================')
        print(f'PSNR: {psnr}')
        print(f'SSIM: {img_ssim}')
        print(f'SNR: {snr}')
        print(f'MSE: {mse}')
        print('============================')

    return psnr, img_ssim, snr, mse
Example #16
0
def minl(lam, signal):
    print('lambda = ', lam)
    print('\n')

    #cargamos los datos
    timev, noise = np.loadtxt('noise.dat',
                              delimiter='	',
                              usecols=(0, 1),
                              unpack=True)
    freq, asd = np.loadtxt('ruido_psd.dat',
                           delimiter='	',
                           usecols=(0, 1),
                           unpack=True)
    psd = asd**2
    s = np.load('datagood.npy')
    ORIGINAL = s[signal]

    #elegimos un segundo de tiempo del vector noise
    index = int(len(noise) / 2)
    RUIDO = noise[0 + index:index + int(len(noise) / 2)]

    #escalamos la señal a la SNR correspondiente
    SNR = 8
    ESCALADA = set_snr(ORIGINAL, psd, SNR, freq, 16384, 1)[1]
    SUMA = ESCALADA + RUIDO
    SUMA = norm(SUMA)

    #ejecutamos el código ROF
    #gs1(f, h, beta, lam, tol)
    DENOISED = gs1(SUMA, 1, .01, lam, .001)
    ORIGINAL_MOD = gs1(ESCALADA, 1, .01, lam, .001)

    lower = 3700
    upper = 4600

    ppplot(norm(SUMA), norm(ORIGINAL_MOD), norm(DENOISED))
    np.save('DENOISED', DENOISED)
    np.save('SUMA', SUMA)
    np.save('ORIGINAL', ORIGINAL_MOD)
    #return(mse(norm(DENOISED)[lower:upper], norm(ORIGINAL_MOD)[lower:upper]))
    return (1 -
            ssim(norm(DENOISED)[lower:upper],
                 norm(ORIGINAL_MOD)[lower:upper]))
def quality(truth, recon):
    """
    ALEX NOTE: Modified it so it uses sci-kit image's psnr, and also added data_range parameters
    """
    # for fixed images truth and reconstruction, evaluates average l2 value and ssim score
    amount_images = truth.shape[0]
    # recon = cut_image(recon)
    l2 = np.average(np.sqrt(np.sum(np.square(truth - recon), axis=(1, 2, 3))))
    psn = 0
    for k in range(amount_images):
        # psn = psn + psnr(truth[k,...,0], cut_image(recon[k,...,0]), data_range=1)
        psn = psn + psnr(truth[k, ..., 0], recon[k, ..., 0], data_range=1)
    psn = psn / amount_images
    ssi = 0
    for k in range(amount_images):
        # ssi = ssi + ssim(truth[k,...,0], cut_image(recon[k,...,0]), data_range=1)
        ssi = ssi + ssim(truth[k, ..., 0], recon[k, ..., 0], data_range=1)
    ssi = ssi / amount_images
    return [l2, psn, ssi]
Example #18
0
def evaluate(args):
    path_in = args.data_dir_in
    path_tar = args.data_dir_tar
    file_in = sorted(os.listdir(path_in))
    file_tar = sorted(os.listdir(path_tar))
    len_list_in = len(file_in)

    # calculate PSNR, SSIM
    psnr_avg = 0
    ssim_avg = 0
    ssim_avg_self = 0
    # SSIM_func = SSIM().cuda()

    for i in range(len_list_in):
        list_in = os.path.join(path_in, file_in[i])
        # list_tar = os.path.join(path_tar, file_tar[i//15])
        list_tar = os.path.join(path_tar, file_tar[i])
        img_in = cv2.imread(list_in)
        img_tar = cv2.imread(list_tar)

        mse = ((img_in - img_tar)**2).mean()
        # psnr_tmp = 10 * log10(255 * 255 / (mse + 10 ** (-10)))
        # psnr_avg += psnr_tmp
        psnr_tmp = psnr(img_in, img_tar, data_range=255)
        psnr_avg += psnr_tmp

        ssim_tmp = ssim(img_in, img_tar, data_range=255, multichannel=True)
        ssim_avg += ssim_tmp

        # img_in_torch, img_tar_torch = RGB_np2tensor(img_in, img_tar)
        # c, h, w = img_in_torch.shape
        # img_in_torch = torch.reshape(img_in_torch, (1, c, h, w))
        # img_tar_torch = torch.reshape(img_tar_torch, (1, c, h, w))
        # ssim_tmp_self = SSIM_func(img_in_torch, img_tar_torch)
        # ssim_avg_self += ssim_tmp_self
        print('%s: PSNR = %2.5f, SSIM = %2.5f' %
              (file_in[i], psnr_tmp, ssim_tmp))

    psnr_avg = psnr_avg / len_list_in
    ssim_avg = ssim_avg / len_list_in
    # ssim_avg_self = ssim_avg_self / len_list_in
    print('avg psnr = %2.5f, avg SSIM = %1.5f' % (psnr_avg, ssim_avg))
Example #19
0
def parse_video(images, video, frames_jump_comparison, verbose=False):
    #iterate through video frames

    #similarities = [{'frame': 0, 'similarity': 0}]
    count = 0

    #get current time
    fps_time = time.clock()

    cap = cv2.VideoCapture(video)
    total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    highlights_frame_number = []

    while (cap.isOpened()):
        ret, frame = cap.read()

        #break at EOF
        if (type(frame) == type(None)):
            break

        #increment frame counter

        print(count)
        print(total_frame)

        count += 1

        #resize current video frame
        small_frame = cv2.resize(frame, (10, 10))
        #convert to greyscale
        small_frame_bw = color.rgb2gray(small_frame)

        similarity = []
        #compare current frame to source image
        for i in range(len(images)):
            similarity.append(ssim(images[i], small_frame_bw))

            if (similarity[i] > 0.95):
                highlights_frame_number.append(count)

    cap.release()
    return highlights_frame_number
def get_metrics(data_folder_1, data_folder_2, metric):
    metric_vals = []

    for i in tqdm(range(10000)):
        file_name = str(i) + ".png"
        file_path_1 = os.path.join(data_folder_1, file_name)
        file_path_2 = os.path.join(data_folder_2, file_name)

        image1 = np.array(Image.open(file_path_1).convert('RGB')) / 255.

        image2 = (np.array(Image.open(file_path_2).convert('RGB')) /
                  255.)[:, :, :3]
        if metric == "ssim":
            metric_vals.append(ssim(image1, image2, multichannel=True))
        elif metric == "psnr":
            metric_vals.append(psnr(image1, image2))
        elif metric == "mse":
            metric_vals.append(1 - mse(image1, image2))

    return metric_vals
Example #21
0
    def print_score(ref_img, res_img):
        """
        Compute SSIM score on two images and print a result.

        :param ref_img: reference image
        :param res_img: result image
        """
        # score = jaccard_score(ref_img.flatten(), res_img.flatten(), average='macro')
        ref_img_float = img_as_float(ImageProcess.to_gray(ref_img))
        res_img_float = img_as_float(ImageProcess.to_gray(res_img))
        score = ssim(ref_img_float,
                     res_img_float,
                     data_range=res_img_float.max() - res_img_float.min(),
                     gaussian_weights=True)
        print("-------------------------")
        print(
            "SSIM: ",
            round(score, 2),
        )
        print("-------------------------")
Example #22
0
def struc_sim(target, img):
	'''
	Calculate structure similarity between 2 images.
	-------------------------------------------------
	param: target: numpy array image in "RGB"
	param: img: numpy array image in "RGB"
	'''
	# target image original size.
	target_dim = (target.shape[1], target.shape[0])
	# if target original size > 500, downsize the image
	if target_dim[0] > 1000 or target_dim[1] > 1000:
		target_dim = tuple([int(d*0.25) for d in target_dim])
	# Downsize target image to be smaller
	target = cv2.resize(target, target_dim, interpolation=cv2.INTER_AREA)
	# resize img to match target size
	img = cv2.resize(img, target_dim, interpolation=cv2.INTER_AREA)
	# convert target image to gray scale
	target_gray = cv2.cvtColor(target, cv2.COLOR_RGB2GRAY)
	img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
	return ssim(target_gray, img_gray)
Example #23
0
def checkSsim(cap: PILImg, tpl: PILImg, loc):
    box = (int(loc[0] * tpl.size[0]), int(loc[1] * tpl.size[1]),
           int(loc[2] * tpl.size[0]), int(loc[3] * tpl.size[1]))

    size = tpl.size
    cap = cap.resize(size).crop(box).convert("L")
    tpl = tpl.resize(size).crop(box).convert("L")

    # for very small window
    size = tpl.size
    while size[0] < 20 or size[1] < 20:
        size = (size[0] * 2, size[1] * 2)

    cap = cap.resize(size)
    tpl = tpl.resize(size)
    # cap.show()
    # tpl.show()
    likeness = ssim(pil_plugin.pil_to_ndarray(cap),
                    pil_plugin.pil_to_ndarray(tpl))
    return likeness
def to_ssim_skimage(dehaze, gt):
    dehaze_list = torch.split(dehaze, 1, dim=0)
    gt_list = torch.split(gt, 1, dim=0)

    dehaze_list_np = [
        dehaze_list[ind].permute(0, 2, 3, 1).data.cpu().numpy().squeeze()
        for ind in range(len(dehaze_list))
    ]
    gt_list_np = [
        gt_list[ind].permute(0, 2, 3, 1).data.cpu().numpy().squeeze()
        for ind in range(len(dehaze_list))
    ]
    ssim_list = [
        ssim(dehaze_list_np[ind],
             gt_list_np[ind],
             data_range=1,
             multichannel=True) for ind in range(len(dehaze_list))
    ]

    return ssim_list
Example #25
0
def SSIM(tensor1, tensor2, zoom=False):
    ssim_list = []
    for slice_idx in [50, 80, 110, 140]:
        if zoom:
            side_a = slice_idx
            side_b = slice_idx + 60
            img1, img2 = tensor1[side_a:side_b, side_a:side_b,
                                 105], tensor2[side_a:side_b, side_a:side_b,
                                               105]
        else:
            img1, img2 = tensor1[slice_idx, :, :], tensor2[slice_idx, :, :]
        img1 = img_as_float(img1)
        img2 = img_as_float(img2)
        ssim_val = ssim(img1, img2)
        if ssim_val != ssim_val:
            print('\n\n Error @ SSIM')
            sys.exit()
        ssim_list.append(ssim_val)
    ssim_avg = sum(ssim_list) / len(ssim_list)
    return ssim_avg
Example #26
0
def compare_images(img1, img2, SSIM_FACTOR=0.8):
    from skimage.metrics import structural_similarity as ssim
    import cv2  # opencv-python

    # load the images -- the original, the original + contrast,
    # and the original + photoshop
    imageA = cv2.imread(img1)
    image_A = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)

    imageB = cv2.imread(img2)
    image_B = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)

    ### Structural Similarity Measure
    s = ssim(image_A, image_B)
    print("SSIM(x,y) = " + str(s))

    if s > SSIM_FACTOR:
        return True
    else:
        return False
Example #27
0
def compute_ssim_rmse():
    """ Compute RMSE and SSIM for images in given dir """

    # results_dir = '../../enlighten_gan/EnlightenGAN/ablation/enlightengan_retrain/test_latest/images'
    true_img_dir = '../datasets/lol/eval15/high'
    # results_dir = '/raid/pkmandke/projects/cv_project/results/ex3tr5_bilinear_rmse/400/images'
    results_dir = './results/clahe/lol/eval15'

    f = open('./results/clahe_final.txt', 'w')
    ssim_sum, rmse_sum = 0., 0.
    for file in os.listdir(results_dir):
        # if file.endswith('.png') and 'fake_B' in file:
        if True:
            true = file
            # true = file.split('.')[0].split('_')[1] + '.png'
            # true = file.split('_')[0] + '.png'
            pred = cv2.cvtColor(cv2.imread(os.path.join(results_dir, file)),
                                cv2.COLOR_BGR2RGB)
            high = cv2.cvtColor(cv2.imread(os.path.join(true_img_dir, true)),
                                cv2.COLOR_BGR2RGB)

            normed = lambda x: (x - x.min()) / (x.max() - x.min())
            pred = normed(pred)
            high = normed(high)

            cur_rmse = mean_squared_error(pred, high)
            cur_ssim = ssim(pred,
                            high,
                            data_range=high.max() - high.min(),
                            multichannel=True,
                            win_size=11)
            ssim_sum += cur_ssim
            rmse_sum += cur_rmse
            f.write("Image: {}, SSIM = {}, RMSE = {}\n".format(
                true, cur_ssim, cur_rmse))

    f.write("Mean SSIM = {}, Mean RMSE = {}\n".format(
        ssim_sum / len(os.listdir(true_img_dir)),
        rmse_sum / len(os.listdir(true_img_dir))))

    f.close()
Example #28
0
def write_gs_summary(slm_field, recon_field, target_amp, k, writer, roi=(880, 1600), prefix='test'):
    """tensorboard summary for GS"""
    slm_phase = slm_field.angle()
    recon_amp, recon_phase = recon_field.abs(), recon_field.angle()
    loss = nn.MSELoss().to(recon_amp.device)

    recon_amp = crop_image(recon_amp, target_shape=roi, stacked_complex=False)
    target_amp = crop_image(target_amp, target_shape=roi, stacked_complex=False)

    recon_amp *= (torch.sum(recon_amp * target_amp, (-2, -1), keepdim=True)
                  / torch.sum(recon_amp * recon_amp, (-2, -1), keepdim=True))

    loss_value = loss(recon_amp, target_amp)
    psnr_value = psnr(target_amp.squeeze().cpu().detach().numpy(), recon_amp.squeeze().cpu().detach().numpy())
    ssim_value = ssim(target_amp.squeeze().cpu().detach().numpy(), recon_amp.squeeze().cpu().detach().numpy())

    if writer is not None:
        writer.add_image(f'{prefix}_Recon/amp', recon_amp.squeeze(0), k)
        writer.add_scalar(f'{prefix}_loss', loss_value, k)
        writer.add_scalar(f'{prefix}_psnr', psnr_value, k)
        writer.add_scalar(f'{prefix}_ssim', ssim_value, k)
Example #29
0
def FrameSimilarity(frames_jpg_path):
    # calculates the "structured similarity index" between adjacent frames
    # ssim() looks at luminance, contrast and structure, it is a scikit-image function
    # we use ssim() for both (1) Shot Change detection, and (2) Action weight
    files = [f for f in os.listdir(frames_jpg_path) if isfile(join(frames_jpg_path,f))]
    files.sort()
    # initialize array
    ssi_array = []
    # number of adjacent frames
    numadj = len(files)-2
    # loop through all adjacent frames and calculate the ssi
    for i in range (0, numadj):
    # for i in range (0, 4000):
        frame_a = cv2.imread(frames_jpg_path+'frame'+str(i)+'.jpg')
        frame_b = cv2.imread(frames_jpg_path+'frame'+str(i+1)+'.jpg')
        frame_a_bw = cv2.cvtColor(frame_a, cv2.COLOR_BGR2GRAY)
        frame_b_bw = cv2.cvtColor(frame_b, cv2.COLOR_BGR2GRAY)
        ssim_ab = ssim(frame_a_bw, frame_b_bw)
        ssim_ab = round(ssim_ab, 3)
        ssi_array.append(ssim_ab)
    return (ssi_array)
Example #30
0
    def compute_mse_ssim_scores(self):
        for img_type in self.image_types:
            images = os.listdir(self.model_path)
            mse_arr = []
            ssim_arr = []
            # mse_info = []
            for img_name in images:
                if img_type in img_name:
                    orig_img = img_as_float(rgb2gray(io.imread(os.path.join(self.gt_path, img_name))))
                    mask_img = img_as_float(rgb2gray(io.imread(os.path.join(self.model_path, img_name))))

                    mse_mask = mean_squared_error(orig_img, mask_img)
                    ssim_mask = ssim(orig_img, mask_img, multichannel=True, gaussian_weights=True, sigma=1.5, use_sample_covariance=False, data_range=255)

                    mse_arr.append(mse_mask)
                    ssim_arr.append(ssim_mask)
                    # mse_info.append({'image_name': img_name, 'image_type':img_type, 'mse': mse_mask, 'ssim': ssim_mask})
            # self.write_list_to_csv(mse_info, mse_info[0].keys(),
            #                        filename='inference_info_' + img_type + '_' + self.model_name + '.csv')
            self.mse_avg[img_type], self.mse_std[img_type] = np.mean(mse_arr), np.std(mse_arr)
            self.ssim_avg[img_type], self.ssim_std[img_type] = np.mean(ssim_arr), np.std(ssim_arr)