def closure_dip():

        global i, out_avg, psnr_noisy_last, last_net, net_input, losses, psnrs, ssims, average_dropout_rate, no_layers,\
               img_mean, sample_count, recons, uncerts, loss_last

        if reg_noise_std > 0:
            net_input = net_input_saved + (noise.normal_() * reg_noise_std)

        out = net(net_input)
        out[:, :1] = out[:, :1].sigmoid()

        _loss = mse(out[:, :1], img_noisy_torch)
        _loss.backward()

        # Smoothing
        if out_avg is None:
            out_avg = out.detach()
        else:
            out_avg = out_avg * exp_weight + out.detach() * (1 - exp_weight)

        losses.append(mse(out_avg[:, :1], img_noisy_torch).item())

        _out = out.detach().cpu().numpy()[0, :1]
        _out_avg = out_avg.detach().cpu().numpy()[0, :1]

        psnr_noisy = compare_psnr(img_noisy_np, _out)
        psnr_gt = compare_psnr(img_np, _out)
        psnr_gt_sm = compare_psnr(img_np, _out_avg)

        ssim_noisy = compare_ssim(img_noisy_np[0], _out[0])
        ssim_gt = compare_ssim(img_np[0], _out[0])
        ssim_gt_sm = compare_ssim(img_np[0], _out_avg[0])

        psnrs.append([psnr_noisy, psnr_gt, psnr_gt_sm])
        ssims.append([ssim_noisy, ssim_gt, ssim_gt_sm])

        if PLOT and i % show_every == 0:
            print(
                f'Iteration: {i} Loss: {_loss.item():.4f} PSNR_noisy: {psnr_noisy:.4f} PSRN_gt: {psnr_gt:.4f} PSNR_gt_sm: {psnr_gt_sm:.4f}'
            )

            out_np = _out

            psnr_noisy = compare_psnr(img_noisy_np, out_np)
            psnr_gt = compare_psnr(img_np, out_np)

            if sample_count != 0:
                psnr_mean = compare_psnr(img_np, img_mean / sample_count)
            else:
                psnr_mean = 0

            print('###################')

            recons.append(out_np)

        i += 1

        return _loss
    def closure_sgld():

        global i, out_avg, psnr_noisy_last, last_net, net_input, losses, psnrs, ssims, average_dropout_rate, no_layers, img_mean, sample_count, recons, uncerts, loss_last

        add_noise(net, LR * (0.9996**i))

        if reg_noise_std > 0:
            net_input = net_input_saved + (noise.normal_() * reg_noise_std)

        out = net(net_input)
        out[:, :1] = out[:, :1].sigmoid()

        _loss = mse(out[:, :1], img_noisy_torch)
        _loss.backward()

        # Smoothing
        if out_avg is None:
            out_avg = out.detach()
        else:
            out_avg = out_avg * exp_weight + out.detach() * (1 - exp_weight)

        losses.append(mse(out_avg[:, :1], img_noisy_torch).item())

        _out = out.detach().cpu().numpy()[0, :1]
        _out_avg = out_avg.detach().cpu().numpy()[0, :1]

        psnr_noisy = compare_psnr(img_noisy_np, _out)
        psnr_gt = compare_psnr(img_np, _out)
        psnr_gt_sm = compare_psnr(img_np, _out_avg)

        ssim_noisy = compare_ssim(img_noisy_np[0], _out[0])
        ssim_gt = compare_ssim(img_np[0], _out[0])
        ssim_gt_sm = compare_ssim(img_np[0], _out_avg[0])

        psnrs.append([psnr_noisy, psnr_gt, psnr_gt_sm])
        ssims.append([ssim_noisy, ssim_gt, ssim_gt_sm])

        if PLOT and i % show_every == 0:
            print(
                f'Iteration: {i} Loss: {_loss.item():.4f} PSNR_noisy: {psnr_noisy:.4f} PSRN_gt: {psnr_gt:.4f} PSNR_gt_sm: {psnr_gt_sm:.4f}'
            )

            out_np = _out
            recons.append(out_np)

            out_np_var = np.var(np.array(recons[-mc_iter:]), axis=0)[:1]

            print('mean epi', out_np_var.mean())
            print('###################')

            uncerts.append(out_np_var)

        i += 1

        return _loss
示例#3
0
def ssim_mse_psnr(image_true, image_test):
    image_true = Any2One(image_true)
    image_test = Any2One(image_test)
    mse = compare_mse(image_true, image_test)
    ssim = compare_ssim(image_true, image_test)
    psnr = compare_psnr(image_true, image_test, data_range=255)
    return ssim, mse, psnr
示例#4
0
def calc_landmark_ssim_score(X, X_recon, lms, wnd_size=None):
    if wnd_size is None:
        wnd_size = (X_recon.shape[-1] // 16) - 1
    input_images = vis.to_disp_images(X, denorm=True)
    recon_images = vis.to_disp_images(X_recon, denorm=True)
    data_range = 255.0 if input_images[0].dtype == np.uint8 else 1.0
    nimgs = len(input_images)
    nlms = len(lms[0])
    scores = np.zeros((nimgs, nlms), dtype=np.float32)
    for i in range(nimgs):
        S = compare_ssim(
            input_images[i],
            recon_images[i],
            data_range=data_range,
            multichannel=True,
            full=True,
        )[1]
        S = S.mean(axis=2)
        for lid in range(nlms):
            x = int(lms[i, lid, 0])
            y = int(lms[i, lid, 1])
            t = max(0, y - wnd_size // 2)
            b = min(S.shape[0] - 1, y + wnd_size // 2)
            l = max(0, x - wnd_size // 2)
            r = min(S.shape[1] - 1, x + wnd_size // 2)
            wnd = S[t:b, l:r]
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", category=RuntimeWarning)
                scores[i, lid] = wnd.mean()
    return np.nan_to_num(scores)
示例#5
0
 def calculateDifference(self, image):            
     image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     resize_height = 150
     scale_percent = (resize_height / image_gray.shape[1])*100
     resize_width = int(image_gray.shape[0] * scale_percent / 100) 
     image_gray = cv2.resize(image_gray, (resize_height, resize_width))
     minFlag = ""
     minFlagVal = 0
     
     for file, filename in self.flags:
         image2 = file
         image2_gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
         #image2_gray = image2
         (H,W) = image_gray.shape
         image2_gray = cv2.resize(image2_gray, (W,H))
         (score, diff) = compare_ssim(image_gray, image2_gray, full=True)
         
         histr_screen = cv2.calcHist([image_gray],[0],None,[256],[0,256])
         histr_screen = cv2.normalize(histr_screen, histr_screen).flatten()
         
         histr_flag = cv2.calcHist([image2],[0],None,[256],[0,256])
         histr_flag = cv2.normalize(histr_flag, histr_flag).flatten()  
             
         d = cv2.compareHist(histr_screen, histr_flag, cv2.HISTCMP_BHATTACHARYYA)
         overall_score = score * (1/d)
         
         if overall_score > minFlagVal:
             minFlagVal = overall_score
             minFlag = filename
             self.FRAME['text'] = minFlag.replace(".png", "")
def processDir_TempDisparity(fnames):
    # need to process one directory at a time, skip first image
        # requires passing current directory list of images
    # idea is that each image will have a 4th channel of the differences between it and the previous image
    # therefore first image in directory will have nothing in the 4th channel
    # from skimage.measure import compare_ssim
    from skimage.metrics import structural_similarity as compare_ssim
    import imutils
    firstImage = True
    for i in range(len(fnames)):
        if firstImage:
            firstImage = False
            pass
        else:
            # load images in grayscale
            img1 = cv2.imread(fnames[i], 0)
            img2 = cv2.imread(fnames[i-1], 0) # comparing to previous image
            # compute difference via SSIM, make image we can add to 4th channel
            (score, diff) = compare_ssim(img1, img2, full=True)
            diff = (diff * 255).astype("uint8")
            # cv2.imshow("diff", diff)
            # cv2.waitKey(10)
            original_img1 = cv2.imread(fnames[i], -1)
            result_image = numpy.dstack((original_img1, diff))
            image_path = fnames[i]
            saveResult(result_image, image_path)
示例#7
0
    def ssim(self, image, ref, roi=None):
        '''
        image: str or array
        ref: str or array
        '''

        # Read image if it is a string

        if type(image) == str:
            image = imread(image).astype(float)

        if type(ref) == str:
            ref = imread(ref).astype(float)

        if roi != None:
            image = image[roi]
            ref = ref[roi]

        max_ref = ref.max()
        min_ref = ref.min()

        ranges = max_ref - min_ref
        ssim = compare_ssim(
            image, ref,
            data_range=ranges)  #measure.compare_ssim() but deprecated

        return ssim
示例#8
0
def ssim_images(images_paths1, images_paths2):
    assert len(images_paths1) == len(images_paths2)
    x1 = np.array([general_pipeline(filename) for filename in images_paths1])
    x2 = np.array([general_pipeline(filename) for filename in images_paths2])
    similarity = np.array(
        [compare_ssim(*img, multichannel=True) for img in zip(x1, x2)])
    return similarity
示例#9
0
文件: sefa.py 项目: McHz1s/genforce
 def inference(self):
     re_sefa_info = SefaInfo(self.distances, self.values, self.num_sem,
                             self.num_sam)
     for sam_id in tqdm(range(self.num_sam), desc='Sample ', leave=False):
         code = self.codes[sam_id:sam_id + 1]
         for sem_id in tqdm(range(self.num_sem),
                            desc='Semantic ',
                            leave=False):
             boundary = self.boundaries[sem_id:sem_id + 1]
             img_list, ssim_list = [], []
             for col_id, d in enumerate(self.distances, start=1):
                 temp_code = code.copy()
                 if self.gan_type == 'pggan':
                     temp_code += boundary * d
                     image = self.generator(to_tensor(temp_code))['image']
                 elif self.gan_type in ['stylegan', 'stylegan2']:
                     temp_code[:, self.layers, :] += boundary * d
                     image = self.generator.synthesis(
                         to_tensor(temp_code))['image']
                 image = post_process(image, transpose=True)[0]
                 gt_image = self.gt_loader[col_id - 1]
                 ssim = compare_ssim(image, gt_image, multichannel=True)
                 img_list.append(image)
                 ssim_list.append(ssim)
             re_sefa_info.add(sam_id, img_list, ssim_list)
     return re_sefa_info
示例#10
0
 def _handle_image(self,
                   input_path,
                   output_path,
                   compare_path=None,
                   abs_out_dir=None,
                   filename=None):
     img1 = cv2.imread(input_path)
     img2 = cv2.imread(compare_path)
     evaluation = self.cfg['evaluate']
     if 'f1' in evaluation:
         bin1 = binaryzation(img1, max=1)
         bin2 = binaryzation(img2, max=1)
         f1_score = f1(bin1, bin2)
         self.history_eval['f1'].append(f1_score)
         print('   f1: %f' % f1_score)
     if 'f2' in evaluation:
         bin1 = binaryzation(img1, max=1)
         bin2 = binaryzation(img2, max=1)
         f2_score = f2(bin1, bin2)
         self.history_eval['f2'].append(f2_score)
         print('   f2: %f' % f2_score)
     if 'psnr' in evaluation:
         psnr = compare_psnr(img1, img2)
         self.history_eval['psnr'].append(psnr)
         print('   psnr: %f' % psnr)
     if 'ssim' in evaluation:
         ssim = compare_ssim(img1, img2, multichannel=True)
         self.history_eval['ssim'].append(ssim)
         print('   ssim: %f' % ssim)
示例#11
0
def find_image_score(img_a, img_b, height, width):
    """
    Calculates the image similarity score and execution time

    Arguments:
        img_a {[image]} -- [image1 from the input file].
        img_b {[image]} -- [image2 from the input file].

    Returns:
        score           -- Score of the similarity between the images compared.
        execution_time  -- Time in secs it takes to run the score for each set of images.
    """
    try:
        start = timeit.default_timer()
        print(f"height: {height} Width: {width}")
        std_dimensions = (height, width)
        img_a_resized = cv2.resize(cv2.imread(img_a), std_dimensions)
        img_b_resized = cv2.resize(cv2.imread(img_b), std_dimensions)

        ssim, _ = compare_ssim(img_a_resized,
                               img_b_resized,
                               full=True,
                               multichannel=True)
        score = round(1 - ssim, 2)

        stop = timeit.default_timer()
        execution_time = round(stop - start, 2)
        return [score, execution_time]

    except Exception as err:
        return f"find_image_score:{err}"
def ssim(frames1, frames2):
    error = 0
    for i in range(len(frames1)):
        error += compare_ssim(frames1[i],
                              frames2[i],
                              multichannel=True,
                              win_size=51)
    return error / len(frames1)
示例#13
0
文件: metrics.py 项目: zzh-tech/RSCD
def ssim_calculate(x, y):
    ssim = compare_ssim(y,
                        x,
                        multichannel=True,
                        gaussian_weights=True,
                        sigma=1.5,
                        use_sample_covariance=False,
                        data_range=255)
    return ssim
示例#14
0
def batch_SSIM(img, imclean, data_range):
    Img = img.data.cpu().numpy().astype(np.float32)
    Iclean = imclean.data.cpu().numpy().astype(np.float32)
    SSIM = 0
    for i in range(Img.shape[0]):
        SSIM += compare_ssim(Iclean[i, 0, :, :],
                             Img[i, 0, :, :],
                             data_range=data_range)
    return (SSIM / Img.shape[0])
示例#15
0
def calc_ssim(learned, real, data_range=1.0):
    learned = learned.data.cpu().numpy().astype(np.float32)
    real = real.data.cpu().numpy().astype(np.float32)
    ssim = 0
    for i in range(learned.shape[0]):
        ssim += compare_ssim(real[i, 0, :, :],
                             learned[i, 0, :, :],
                             data_range=data_range)
    return (ssim / learned.shape[0])
示例#16
0
def batch_SSIM(img, imclean, data_range):
    Img = img.data.cpu().numpy().astype(np.float32)
    Iclean = imclean.data.cpu().numpy().astype(np.float32)
    SSIM = 0
    for i in range(Img.shape[0]):
        SSIM += compare_ssim(Iclean[i, :, :, :].transpose(1, 2, 0),
                             Img[i, :, :, :].transpose(1, 2, 0),
                             data_range=data_range,
                             multichannel=True)
    return (SSIM / Img.shape[0])
def calc_ssim(X, X_recon):
    ssim = np.zeros(len(X))
    input_images = vis.reconstruct_images(X)
    recon_images = vis.reconstruct_images(X_recon)
    for i in range(len(X)):
        data_range = 255.0 if input_images[0].dtype == np.uint8 else 1.0
        ssim[i] = compare_ssim(input_images[i],
                               recon_images[i],
                               data_range=data_range,
                               multichannel=True)
    return ssim
示例#18
0
 def structural_sim(self, path_a, path_b):
     """
 Measure the structural similarity between two images
 type path_a: str
 param path_b: the path to an image file
 type path_b: str
 return: a float {-1:1} that measures structural similarity between the input images
 rtype: {float}
 """
     img_a = self.get_img(path_a)
     img_b = self.get_img(path_b)
     sim, diff = compare_ssim(img_a, img_b, full=True)
     return sim
示例#19
0
def ssim_score(generated_images, reference_images):
    ssim_score_list = []
    for reference_image, generated_image in zip(reference_images,
                                                generated_images):
        ssim = compare_ssim(reference_image,
                            generated_image,
                            gaussian_weights=True,
                            sigma=1.5,
                            use_sample_covariance=False,
                            multichannel=True,
                            data_range=generated_image.max() -
                            generated_image.min())
        ssim_score_list.append(ssim)
    return np.mean(ssim_score_list)
def acr_optimizer(x_init, x_ground_truth, y_test, n_iter, lambda_acr, lr=0.80):
    x_cvx = x_init.clone().detach().requires_grad_(True).to(device)
    x_optimizer = torch.optim.SGD([x_cvx], lr=lr)
    x_test_np = x_ground_truth.cpu().detach().numpy()
    data_range = np.max(x_test_np) - np.min(x_test_np)

    for iteration in np.arange(n_iter):
        x_optimizer.zero_grad()
        y_cvx = fwd_op(x_cvx)
        data_loss = sq_loss(y_test, y_cvx)

        ####### compute the regularization term ############
        prior_acr = lambda_acr * acr(x_cvx).mean()
        prior_sfb = lambda_acr * sfb(x_cvx).mean()
        prior_l2 = lambda_acr * l2_net(x_cvx).mean()
        prior = prior_acr + prior_sfb + prior_l2

        variational_loss = data_loss + prior

        variational_loss.backward(retain_graph=True)
        x_optimizer.step()
        #lr_scheduler.step()

        x_np = x_cvx.cpu().detach().numpy().squeeze()
        psnr = compare_psnr(np.squeeze(x_test_np), x_np, data_range=data_range)
        ssim = compare_ssim(np.squeeze(x_test_np), x_np, data_range=data_range)

        if (iteration % 50 == 0):
            recon_log = '[iter: {:d}/{:d}\t PSNR: {:.4f}, SSIM: {:.4f}, var_loss: {:.6f}, regularization: ACR {:.6f}, SFB {:.6f}, l2-term {:.6f}]'\
            .format(iteration, n_iter, psnr, ssim, variational_loss.item(), prior_acr.item(), prior_sfb.item(), prior_l2.item())

            print(recon_log)

    x_np = x_cvx.cpu().detach().numpy().squeeze()
    psnr = compare_psnr(np.squeeze(x_test_np), x_np, data_range=data_range)
    ssim = compare_ssim(np.squeeze(x_test_np), x_np, data_range=data_range)
    return x_np, psnr, ssim
示例#21
0
def ssim(img, ref, dynamic_range=None, axes=(0, 1)):
    """ Compute the structural similarity index.
    :param img: input image (np.array)
    :param ref: reference image (np.array)
    :param dynamic_range: If dynamic_range != None, the same given dynamic range will be used for all slices in the volume.
                          Otherwise, the dynamic_range is computed slice-per-slice.
    :param axes: tuple of axes over which the ssim is computed
    :return: (mean) ssim
    """

    assert len(axes) == 2
    assert img.shape == ref.shape
    if img.ndim == 2 and axes == (0, 1):
        img = img.copy()[np.newaxis]
        ref = ref.copy()[np.newaxis]
    elif img.ndim == 2 and axes != (0, 1):
        raise ValueError("axes of 2d array have to equal (0,1)")
    else:
        axes = list(axes)
        full_axes = list(range(0, img.ndim))
        transpose_axes = [item
                          for item in full_axes if item not in axes] + axes
        unwrap_axes = [transpose_axes.index(item) for item in full_axes]
        img = np.transpose(img.copy(), transpose_axes)
        img = np.reshape(img, (np.prod(img.shape[:-2]), ) + img.shape[-2:])
        ref = np.transpose(ref.copy(), transpose_axes)
        ref = np.reshape(ref, (np.prod(ref.shape[:-2]), ) + ref.shape[-2:])

    # ssim averaged over slices
    ssim_slices = []
    ref_abs = np.abs(ref)
    img_abs = np.abs(img)

    for i in range(ref_abs.shape[0]):
        if dynamic_range == None:
            drange = np.max(ref_abs[i]) - np.min(ref_abs[i])
        else:
            drange = dynamic_range
        _, ssim_i = compare_ssim(img_abs[i],
                                 ref_abs[i],
                                 data_range=drange,
                                 gaussian_weights=True,
                                 use_sample_covariance=False,
                                 full=True)
        ssim_slices.append(np.mean(ssim_i))

    return np.mean(ssim_slices)
def compare_frame(frameA, frameB):
    grayA = cv2.cvtColor(frameA, cv2.COLOR_BGR2GRAY)
    grayB = cv2.cvtColor(frameB, cv2.COLOR_BGR2GRAY)

    score, diff = compare_ssim(grayA, grayB, full=True)
    diff = (diff * 255).astype("uint8")
    #sys.stdout.write('\rSSIM: {}'.format(score))
    #sys.stdout.flush()

    thresh = cv2.threshold(diff, 180, 255, cv2.THRESH_BINARY_INV)[1]
    opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, np.ones((10, 10)))
    opening = cv2.dilate(opening, np.ones((20, 20)))
    cnts = cv2.findContours(opening.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    return cnts
示例#23
0
    def closure():
        global i, psnr_history, psnr_history_short, ssim_history_short

        if reg_noise_std > 0:
            net_input = net_input_saved + (noise.normal_() * reg_noise_std)

        x_hat = net(net_input)
        if loss_type == 'dip':
            tv_weight = 0 # or 0 if no tv (1e-5 radial/gaus or 1e-6 if tv is on with uniform filter)
            fourier_k, fourier_conv = torch_fourier_conv(x_hat, filter(filter_type))
            fft_y = torch.rfft(y, 2, onesided=False, normalized=False).cpu()
            total_loss = mse(fourier_conv, fft_y).to(self.device)
        elif loss_type == 'bp':
            tv_weight = 1e-3 # 1e-3 or 0 if no tv
            total_loss = BP_loss(x_hat, y, noise_lvl, filter_type).to(self.device)

        if tv_weight > 0:
            mul_factor = 0
            #print(total_loss)
            #print(tv_weight * tv_loss(x_hat, mul_factor).to(self.device))
            total_loss = total_loss + tv_weight * tv_loss(x_hat, mul_factor).to(self.device)

        total_loss.backward()

        # Log
        orig_img = imgs['HR_np']
        x_hat_np = torch_to_np(x_hat)
        psnr = compare_psnr(orig_img, x_hat_np)
        ssim = compare_ssim(np.moveaxis(orig_img, 0, -1), np.moveaxis(x_hat_np, 0, -1), multichannel=True)

        # History
        psnr_history.append([psnr])
        if i % 100 == 0:
            psnr_history_short.append([psnr])
            ssim_history_short.append([ssim])
            print('Iteration %05d     PSNR %.3f     SSIM %.3f' % (i, psnr, ssim), '\r')

        if PLOT and i % 100 == 0:
            x_hat_np = torch_to_np(x_hat)
            plot_image_grid([imgs['HR_np'], x_hat_np], factor=13, nrow=3)
            print('Iteration %05d     PSNR %.3f' % (i, psnr), '\r')
            print('Iteration %05d     SSIM %.3f' % (i, ssim), '\r')
        i += 1

        return total_loss
示例#24
0
def ssim(test, ref, mask=None):
    """Structural Similarity (SSIM).

    Calculate the SSIM between a test image and a reference image.

    Parameters
    ----------
    ref : numpy.ndarray
        The reference image
    test : numpy.ndarray
        The tested image
    mask : numpy.ndarray, optional
        The mask for the ROI (default is ``None``)

    Raises
    ------
    ImportError
        If Scikit-Image package not found

    Notes
    -----
    Compute the metric only on magnetude.

    Returns
    -------
    float
        The SNR

    """
    if not import_skimage:  # pragma: no cover
        raise ImportError(
            'Required version of Scikit-Image package not found' +
            'see documentation for details: https://cea-cosmic.' +
            'github.io/ModOpt/#optional-packages', )

    test, ref, mask = _preprocess_input(test, ref, mask)
    test = move_to_cpu(test)
    assim, ssim_value = compare_ssim(test, ref, full=True)

    if mask is None:
        return assim

    return (mask * ssim_value).sum() / mask.sum()
示例#25
0
 def test(self):
     self.net.eval()
     # torch.set_grad_enabled(False)
     psnrs = list()
     ssims = list()
     for ii, data in enumerate(self.test_loader):
         lr, hr = [x.to(self.device) for x in data]
         sr = self.net(lr)
         sr = torch.clamp(sr, 0, 1)
         sr = sr.cpu().detach().numpy() * 255
         hr = hr.cpu().detach().numpy() * 255
         sr = np.transpose(sr.squeeze(), (1, 2, 0))
         hr = np.transpose(hr.squeeze(), (1, 2, 0))
         sr = sr.astype(np.uint8)
         hr = hr.astype(np.uint8)
         psnr = compare_psnr(hr, sr, data_range=255)
         ssim = compare_ssim(hr, sr, data_range=255, multichannel=True)
         psnrs.append(psnr)
         ssims.append(ssim)
     print('PSNR= {0:.4f}, SSIM= {1:.4f}'.format(np.mean(psnrs),
                                                 np.mean(ssims)))
示例#26
0
def main(args):
    if args.width:
        width = int(args.width) // 36 * 36
    else:
        width = 324
    if width < 36:
        print("Too small image, can't denoised")
        return 1
    testset = RENOIR_Dataset(
        img_dir=args.testdir,
        transform=transforms.Compose([standardize(w=width),
                                      ToTensor()]),  # 0.36
    )
    dataloader = DataLoader(testset, batch_size=1, shuffle=False)
    cuda = True if torch.cuda.is_available() else False
    dtype = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    glr = DeepGLR(width=36, cuda=cuda, opt=opt)
    device = torch.device("cuda") if cuda else torch.device("cpu")
    glr.load_state_dict(torch.load(args.model, map_location=device))
    print("CUDA: {0}, device: {1}".format(cuda, device))
    psnrs = list()
    _psnrs = list()
    _ssims = list()
    tstart = time.time()
    for imgidx, sample in enumerate(dataloader):
        T1, T1r = sample["nimg"].squeeze(0).float(), sample["rimg"].squeeze(
            0).float()
        print(T1r.shape, T1.shape)
        m = T1.shape[-1]
        dummy = np.zeros(shape=(3, T1.shape[-1], T1.shape[-2]))
        T2 = (torch.from_numpy(T1.detach().numpy().transpose(1, 2, 0)).unfold(
            0, 36, 36).unfold(1, 36, 36)).type(dtype)
        T2r = (torch.from_numpy(T1r.detach().numpy().transpose(
            1, 2, 0)).unfold(0, 36, 36).unfold(1, 36, 36))

        s2 = int(T2.shape[-1])

        for ii, i in enumerate(range(T2.shape[1])):
            P = glr.predict(T2[i, :, :, :, :].float())

            img1 = T2r[i, :, :, :, :].float()
            if cuda:
                P = P.cpu()

            img2 = P
            psnrs.append(cv2.PSNR(img1.detach().numpy(),
                                  img2.detach().numpy()))

            print("\r{0}, {1}/{2}".format(P.shape, ii + 1, P.shape[0]),
                  end=" ")
            for b, j in enumerate(range(0, m, s2)):
                dummy[:, (i * s2):(i * s2 + s2),
                      j:(j + s2)] = P[b].detach().numpy()
        print("\nPrediction time: ", time.time() - tstart)
        print("PSNR: ", np.mean(np.array(psnrs)))
        _psnrs.append(np.mean(np.array(psnrs)))
        ds = np.array(dummy).copy()
        new_d = list()
        for d in ds:
            _d = (d - d.min()) * (1 / (d.max() - d.min()))
            new_d.append(_d)
        d = np.array(new_d).transpose(1, 2, 0)
        if args.output:
            opath = os.path.join(args.output, str(imgidx) + ".png")
            opathr = os.path.join(args.output, str(imgidx) + "_ref.png")
        else:
            opath = "./{0}{1}".format(imgidx, ".png")
            opathr = "./{0}{1}".format(imgidx, "_ref.png")
        plt.imsave(opath, d)
        d = cv2.imread(opath)
        d = cv2.cvtColor(d, cv2.COLOR_BGR2RGB)
        tref = sample["rimg"].squeeze(0)
        tref = tref.detach().numpy().transpose((1, 2, 0))
        plt.imsave(opathr, tref)
        (score, diff) = compare_ssim(tref, d, full=True, multichannel=True)
        _ssims.append(score)
        print("SSIM: ", score)
        print("Saved ", opath)
    print("Mean PSNR: {0:.3f}".format(np.mean(_psnrs)))
    print("Mean SSIM: {0:.3f}".format(np.mean(_ssims)))
    print("Total running time: {0:.3f}".format(time.time() - tstart))
    #convex reg. reconstruction and FBP
    if clip_fbp:
        fbp_image = mayo_utils.cut_image(fbp_image, vmin=0.0, vmax=1.0)
        n_iter, lambda_acr = 400, 0.05
    else:
        n_iter, lambda_acr = 350, 0.04

    x_init = torch.from_numpy(fbp_image).view(fbp.size()).to(device)
    x_np_cvx, psnr_cvx, ssim_cvx = acr_optimizer(x_init,
                                                 phantom,
                                                 sinogram,
                                                 n_iter=n_iter,
                                                 lambda_acr=lambda_acr)

    psnr_fbp = compare_psnr(phantom_image, fbp_image, data_range=data_range)
    ssim_fbp = compare_ssim(phantom_image, fbp_image, data_range=data_range)

    recon_log = 'test-image [{:d}/{:d}]:\t FBP: PSNR {:.4f}, SSIM {:.4f}\t convex-reg: PSNR {:.4f}, SSIM {:.4f}\n'\
        .format(idx, len(eval_dataloader), psnr_fbp, ssim_fbp, psnr_cvx, ssim_cvx)

    print(recon_log)
    log_file.write(recon_log)

    ### compute running sum for average
    psnr_fbp_avg += psnr_fbp
    ssim_fbp_avg += ssim_fbp
    psnr_cvx_avg += psnr_cvx
    ssim_cvx_avg += ssim_cvx
    num_test_images += 1

    ### save as numpy arrays ####
示例#28
0
def denoise(
    inp,
    gtv,
    argref,
    normalize=False,
    stride=36,
    width=324,
    prefix="_",
    verbose=0,
    opt=None,
    args=None,
    logger=None,
):
    try:
        from skimage.metrics import structural_similarity as compare_ssim
    except Exception:
        from skimage.measure import compare_ssim
    sample = cv2.imread(inp)
    # logger.info(inp)
    if width is None:
        width = sample.shape[0]
    else:
        sample = cv2.resize(sample, (width, width))
    sample = cv2.cvtColor(sample, cv2.COLOR_BGR2RGB)
    sample = sample.transpose((2, 0, 1))
    shape = sample.shape

    if normalize:
        sample = _norm(sample, newmin=0, newmax=1)
    sample = torch.from_numpy(sample)

    cuda = True if torch.cuda.is_available() else False

    dtype = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    if argref:
        ref = cv2.imread(argref)
        if ref.shape[0] != width or ref.shape[1] != width:
            ref = cv2.resize(ref, (width, width))
        ref = cv2.cvtColor(ref, cv2.COLOR_BGR2RGB)
        tref = ref.copy()
        ref = ref.transpose((2, 0, 1))
        ref = torch.from_numpy(ref)
        if normalize:
            ref = _norm(ref, newmin=0, newmax=1)

    tstart = time.time()
    T1 = sample
    if argref:
        T1r = ref
    else:
        logger.info(str(T1.shape))

    m = T1.shape[-1]
    T1 = torch.nn.functional.pad(T1, (0, stride, 0, stride), mode="constant", value=0)
    shapex = T1.shape
    T2 = (
        torch.from_numpy(T1.detach().numpy().transpose(1, 2, 0))
        .unfold(0, opt.width, stride)
        .unfold(1, opt.width, stride)
    ).type(dtype)
    T2 = T2.contiguous()
    if argref:
        T1r = torch.nn.functional.pad(
            T1r, (0, stride, 0, stride), mode="constant", value=0
        )

    MAX_PATCH = args.multi
    oT2s0 = T2.shape[0]
    T2 = T2.view(-1, opt.channels, opt.width, opt.width)
    dummy = torch.zeros(T2.shape).type(dtype)
    with torch.no_grad():
        for ii, i in enumerate(range(0, T2.shape[0], MAX_PATCH)):
            P = gtv.predict(T2[i : (i + MAX_PATCH), :, :, :].float().contiguous())
            dummy[i : (i + MAX_PATCH)] = P
    dummy = dummy.view(oT2s0, -1, opt.channels, opt.width, opt.width)
    dummy = dummy.cpu()

    pred_time = time.time()-tstart
    #logger.info("Prediction time: {0:.2f}".format(time.time() - tstart))
    if argref:
        # logger.info("PSNR: {:.2f}".format(np.mean(np.array(psnrs))))
        pass

    dummy = (
        patch_merge(dummy, stride=stride, shape=shapex, shapeorg=shape).detach().numpy()
    )

    ds = np.array(dummy).copy()
    d = np.minimum(np.maximum(ds, 0), 255)
    d = d.transpose(1, 2, 0) / 255
    #logger.info("RANGE: {0:.4f} - {1:.4f}".format(d.min(), d.max()))
    if 0:
        opath = args.output
    else:
        filename = inp.split("/")[-1]
        opath = RESROOT + "/{0}_{1}".format(prefix, filename)
        opath = opath[:-3] + "png"
    d = np.minimum(np.maximum(d, 0), 1)
    plt.imsave(opath, d)
    if argref:
        mse = ((d - (tref / 255.0)) ** 2).mean() * 255
        #logger.info("MSE: {:.5f}".format(mse))
        d = cv2.imread(opath)
        d = cv2.cvtColor(d, cv2.COLOR_BGR2RGB)
        psnr2 = cv2.PSNR(tref, d)
        #logger.info("PSNR: {:.5f}".format(psnr2))
        (score, diff) = compare_ssim(tref, d, full=True, multichannel=True)
        #logger.info("SSIM: {:.5f}".format(score))
        logger.info(f"Time: {pred_time:.2f} || PSNR: {psnr2:.3f} || SSIM: {score:.3f} || Saved: {opath}")
    else:
        logger.info(f"Time: {pred_time:.2f} || Saved: {opath}")

    if argref:
        return (0, score, 0, psnr2, mse, d)  # psnr, ssim, denoised image
    return d
示例#29
0
def main_eva(
    seed,
    model_name,
    trainset,
    testset,
    imgw=None,
    verbose=0,
    image_path=None,
    noise_type="gauss",
    opt=None,
    logger=None,
):
    gtv = DeepGTV(width=36, cuda=cuda, opt=opt)  # just initialize to load the trained model, no need to change
    PATH = model_name
    device = torch.device("cuda") if cuda else torch.device("cpu")

    #gtv.load_state_dict(torch.load(PATH, map_location=device))
    gtv = torch.load(PATH, map_location=device)
    width = gtv.opt.width
    opt.width = width
    opt=gtv.opt
    if noise_type == "gauss":
        npref = "_g"
    else:
        npref = "_n"

    logger.info("EVALUATING TRAIN SET")

    # trainset = ["10", "1", "7", "8", "9"]
    traineva = {
        "psnr": list(),
        "ssim": list(),
        "ssim2": list(),
        "psnr2": list(),
        "mse": list(),
    }
    stride = args.stride
    for t in trainset:
        logger.info("++++++++++++++++++++++++++++++++")
        inp = "{0}/noisy/{1}{2}.bmp".format(image_path, t, npref)
        logger.info(f"image #{t}: {inp}")
        argref = "{0}/ref/{1}_r.bmp".format(image_path, t)
        _, _ssim, _, _psnr2, _mse, _ = denoise(
            inp,
            gtv,
            argref,
            stride=stride,
            width=imgw,
            prefix=seed,
            opt=opt,
            args=args,
            logger=logger,
        )
        # traineva["psnr"].append(_psnr)
        traineva["ssim"].append(_ssim)
        # traineva["ssim2"].append(_ssim2)
        traineva["psnr2"].append(_psnr2)
        traineva["mse"].append(_mse)
        try:
            from skimage.metrics import structural_similarity as compare_ssim
        except Exception:
            from skimage.measure import compare_ssim

        img1 = cv2.imread(inp)[:, :, : opt.channels]
        img2 = cv2.imread(argref)[:, :, : opt.channels]
        (score, diff) = compare_ssim(img1, img2, full=True, multichannel=True)
        #logger.info("Original {0:.2f} {1:.2f}".format(cv2.PSNR(img1, img2), score))
    logger.info("==============SUMMARY==============")
    logger.info("MEAN SSIM: {:.2f}".format(np.mean(traineva["ssim"])))
    logger.info(
        "MEAN PSNR: {:.2f}".format(np.mean(traineva["psnr2"]))
    )
    logger.info("MEAN MSE: {:.2f}".format(np.mean(traineva["mse"])))
    logger.info("===================================")

    logger.info("EVALUATING TEST SET")
    # testset = ["2", "3", "4", "5", "6"]
    testeva = {
        "psnr": list(),
        "ssim": list(),
        "ssim2": list(),
        "psnr2": list(),
        "mse": list(),
    }
    for t in testset:
        logger.info("++++++++++++++++++++++++++++++++")
        inp = "{0}/noisy/{1}{2}.bmp".format(image_path, t, npref)
        logger.info(f"image #{t}: {inp}")
        argref = "{0}/ref/{1}_r.bmp".format(image_path, t)
        _psnr, _ssim, _ssim2, _psnr2, _mse, _ = denoise(
            inp,
            gtv,
            argref,
            stride=stride,
            width=imgw,
            prefix=seed,
            opt=opt,
            args=args,
            logger=logger,
        )
        # testeva["psnr"].append(_psnr)
        testeva["ssim"].append(_ssim)
        # testeva["ssim2"].append(_ssim2)
        testeva["psnr2"].append(_psnr2)
        testeva["mse"].append(_mse)
        try:
            from skimage.metrics import structural_similarity as compare_ssim
        except Exception:
            from skimage.measure import compare_ssim

        img1 = cv2.imread(inp)[:, :, : opt.channels]
        img2 = cv2.imread(argref)[:, :, : opt.channels]
        (score, diff) = compare_ssim(img1, img2, full=True, multichannel=True)
        #logger.info("Original {0:.2f} {1:.2f}".format(cv2.PSNR(img1, img2), score))
    logger.info("==============SUMMARY==============")
    logger.info("MEAN SSIM: {:.2f}".format(np.mean(testeva["ssim"])))
    logger.info(
        "MEAN PSNR: {:.2f}".format(np.mean(testeva["psnr2"]))
    )
    logger.info("MEAN MSE: {:.2f}".format(np.mean(testeva["mse"])))
    logger.info("===================================")
    return traineva, testeva
    def visualize_batch(self,
                        batch,
                        X_recon,
                        ssim_maps,
                        nimgs=8,
                        ds=None,
                        wait=0):

        nimgs = min(nimgs, len(batch))
        train_state_D = self.saae.D.training
        train_state_Q = self.saae.Q.training
        train_state_P = self.saae.P.training
        self.saae.D.eval()
        self.saae.Q.eval()
        self.saae.P.eval()

        loc_err_gan = "tr"
        text_size_errors = 0.65

        input_images = vis.reconstruct_images(batch.images[:nimgs])
        show_filenames = batch.filenames[:nimgs]
        target_images = (batch.target_images
                         if batch.target_images is not None else batch.images)
        disp_images = vis.reconstruct_images(target_images[:nimgs])

        # draw GAN score
        if self.args.with_gan:
            with torch.no_grad():
                err_gan_inputs = self.saae.D(batch.images[:nimgs])
            disp_images = vis.add_error_to_images(
                disp_images,
                errors=1 - err_gan_inputs,
                loc=loc_err_gan,
                format_string="{:>5.2f}",
                vmax=1.0,
            )

        # disp_images = vis.add_landmarks_to_images(disp_images, batch.landmarks[:nimgs], color=(0,1,0), radius=1,
        #                                           draw_wireframe=False)
        rows = [vis.make_grid(disp_images, nCols=nimgs, normalize=False)]

        recon_images = vis.reconstruct_images(X_recon[:nimgs])
        disp_X_recon = recon_images.copy()

        print_stats = True
        if print_stats:
            # lm_ssim_errs = None
            # if batch.landmarks is not None:
            #     lm_recon_errs = lmutils.calc_landmark_recon_error(batch.images[:nimgs], X_recon[:nimgs], batch.landmarks[:nimgs], reduction='none')
            #     disp_X_recon = vis.add_error_to_images(disp_X_recon, lm_recon_errs, size=text_size_errors, loc='bm',
            #                                            format_string='({:>3.1f})', vmin=0, vmax=10)
            #     lm_ssim_errs = lmutils.calc_landmark_ssim_error(batch.images[:nimgs], X_recon[:nimgs], batch.landmarks[:nimgs])
            #     disp_X_recon = vis.add_error_to_images(disp_X_recon, lm_ssim_errs.mean(axis=1), size=text_size_errors, loc='bm-1',
            #                                            format_string='({:>3.2f})', vmin=0.2, vmax=0.8)

            X_recon_errs = 255.0 * torch.abs(batch.images - X_recon).reshape(
                len(batch.images), -1).mean(dim=1)
            # disp_X_recon = vis.add_landmarks_to_images(disp_X_recon, batch.landmarks[:nimgs], radius=1, color=None,
            #                                            lm_errs=lm_ssim_errs, draw_wireframe=False)
            disp_X_recon = vis.add_error_to_images(
                disp_X_recon[:nimgs],
                errors=X_recon_errs,
                size=text_size_errors,
                format_string="{:>4.1f}",
            )
            if self.args.with_gan:
                with torch.no_grad():
                    err_gan = self.saae.D(X_recon[:nimgs])
                disp_X_recon = vis.add_error_to_images(
                    disp_X_recon,
                    errors=1 - err_gan,
                    loc=loc_err_gan,
                    format_string="{:>5.2f}",
                    vmax=1.0,
                )

            ssim = np.zeros(nimgs)
            for i in range(nimgs):
                data_range = 255.0 if input_images[0].dtype == np.uint8 else 1.0
                ssim[i] = compare_ssim(
                    input_images[i],
                    recon_images[i],
                    data_range=data_range,
                    multichannel=True,
                )
            disp_X_recon = vis.add_error_to_images(
                disp_X_recon,
                1 - ssim,
                loc="bl-1",
                size=text_size_errors,
                format_string="{:>4.2f}",
                vmin=0.2,
                vmax=0.8,
            )

            if ssim_maps is not None:
                disp_X_recon = vis.add_error_to_images(
                    disp_X_recon,
                    ssim_maps.reshape(len(ssim_maps), -1).mean(axis=1),
                    size=text_size_errors,
                    loc="bl-2",
                    format_string="{:>4.2f}",
                    vmin=0.0,
                    vmax=0.4,
                )

        rows.append(vis.make_grid(disp_X_recon, nCols=nimgs))

        if ssim_maps is not None:
            disp_ssim_maps = to_numpy(
                nn.denormalized(ssim_maps)[:nimgs].transpose(0, 2, 3, 1))
            if disp_ssim_maps.shape[3] == 1:
                disp_ssim_maps = disp_ssim_maps.repeat(3, axis=3)
            for i in range(len(disp_ssim_maps)):
                disp_ssim_maps[i] = vis.color_map(
                    disp_ssim_maps[i].mean(axis=2), vmin=0.0, vmax=2.0)
            grid_ssim_maps = vis.make_grid(disp_ssim_maps, nCols=nimgs)
            cv2.imwrite("ssim errors.jpg",
                        cv2.cvtColor(grid_ssim_maps, cv2.COLOR_RGB2BGR))

        self.saae.D.train(train_state_D)
        self.saae.Q.train(train_state_Q)
        self.saae.P.train(train_state_P)

        f = 1
        disp_rows = vis.make_grid(rows, nCols=1, normalize=False, fx=f, fy=f)
        wnd_title = "recon errors "
        if ds is not None:
            wnd_title += ds.__class__.__name__
        cv2.imwrite(wnd_title + ".jpg",
                    cv2.cvtColor(disp_rows, cv2.COLOR_RGB2BGR))
        cv2.waitKey(wait)