def metric_psnr(im_true, im_test, reduce=True, return_per_frame=False):
    if im_true.dim() == 3:
        im_true, im_test = im_true[None, None], im_test[None, None]
    elif im_true.dim() == 4:
        im_true, im_test = im_true[None], im_test[None]
    # make channel last
    real = ((im_true + 1.) / 2.).permute(0, 1, 3, 4, 2).cpu().numpy()
    fake = ((im_test + 1.) / 2.).permute(0, 1, 3, 4, 2).cpu().numpy()

    psnr_batch = np.asarray([
        compare_psnr(r, f, data_range=1.)
        for r, f in zip(real.reshape(-1, *real.shape[2:]),
                        fake.reshape(-1, *fake.shape[2:]))
    ])

    if return_per_frame:
        psnr_per_frame = {
            i:
            np.asarray([compare_psnr(real[:, i], fake[:, i], data_range=1.)])
            for i in range(real.shape[1])
        }

    if reduce:
        if return_per_frame:
            psnr_pf_reduced = {
                key: psnr_per_frame[key]
                for key in psnr_per_frame
            }
            return np.mean(psnr_batch), psnr_pf_reduced
        else:
            return np.mean(psnr_batch)
    if return_per_frame:
        return psnr_batch, psnr_per_frame
    else:
        return psnr_batch
    def closure_sgld():

        global i, out_avg, psnr_noisy_last, last_net, net_input, losses, psnrs, ssims, average_dropout_rate, no_layers, img_mean, sample_count, recons, uncerts, loss_last

        add_noise(net)

        if reg_noise_std > 0:
            net_input = net_input_saved + (noise.normal_() * reg_noise_std)

        out = net(net_input)
        out[:, :1] = out[:, :1].sigmoid()

        _loss = mse(out[:, :1], img_noisy_torch)
        _loss.backward()

        # Smoothing
        if out_avg is None:
            out_avg = out.detach()
        else:
            out_avg = out_avg * exp_weight + out.detach() * (1 - exp_weight)

        losses.append(mse(out_avg[:, :1], img_noisy_torch).item())

        _out = out.detach().cpu().numpy()[0, :1]
        _out_avg = out_avg.detach().cpu().numpy()[0, :1]

        psnr_noisy = compare_psnr(img_noisy_np, _out)
        psnr_gt = compare_psnr(img_np, _out)
        psnr_gt_sm = compare_psnr(img_np, _out_avg)

        ssim_noisy = compare_ssim(img_noisy_np[0], _out[0])
        ssim_gt = compare_ssim(img_np[0], _out[0])
        ssim_gt_sm = compare_ssim(img_np[0], _out_avg[0])

        psnrs.append([psnr_noisy, psnr_gt, psnr_gt_sm])
        ssims.append([ssim_noisy, ssim_gt, ssim_gt_sm])

        if PLOT and i % show_every == 0:
            print(
                f'Iteration: {i} Loss: {_loss.item():.4f} PSNR_noisy: {psnr_noisy:.4f} PSRN_gt: {psnr_gt:.4f} PSNR_gt_sm: {psnr_gt_sm:.4f}'
            )

            out_np = _out
            recons.append(out_np)

            out_np_var = np.var(np.array(recons[-mc_iter:]), axis=0)[:1]

            print('mean epi', out_np_var.mean())
            print('###################')

            uncerts.append(out_np_var)

        i += 1

        return _loss
示例#3
0
def ssim_mse_psnr(image_true, image_test):
    image_true = Any2One(image_true)
    image_test = Any2One(image_test)
    mse = compare_mse(image_true, image_test)
    ssim = compare_ssim(image_true, image_test)
    psnr = compare_psnr(image_true, image_test, data_range=255)
    return ssim, mse, psnr
示例#4
0
 def _handle_image(self,
                   input_path,
                   output_path,
                   compare_path=None,
                   abs_out_dir=None,
                   filename=None):
     img1 = cv2.imread(input_path)
     img2 = cv2.imread(compare_path)
     evaluation = self.cfg['evaluate']
     if 'f1' in evaluation:
         bin1 = binaryzation(img1, max=1)
         bin2 = binaryzation(img2, max=1)
         f1_score = f1(bin1, bin2)
         self.history_eval['f1'].append(f1_score)
         print('   f1: %f' % f1_score)
     if 'f2' in evaluation:
         bin1 = binaryzation(img1, max=1)
         bin2 = binaryzation(img2, max=1)
         f2_score = f2(bin1, bin2)
         self.history_eval['f2'].append(f2_score)
         print('   f2: %f' % f2_score)
     if 'psnr' in evaluation:
         psnr = compare_psnr(img1, img2)
         self.history_eval['psnr'].append(psnr)
         print('   psnr: %f' % psnr)
     if 'ssim' in evaluation:
         ssim = compare_ssim(img1, img2, multichannel=True)
         self.history_eval['ssim'].append(ssim)
         print('   ssim: %f' % ssim)
示例#5
0
def compute_psnr(pred, tar):
    assert pred.shape == tar.shape
    pred = pred.transpose(0, 2, 3, 1)
    tar = tar.transpose(0, 2, 3, 1)
    psnr = 0
    for i in range(pred.shape[0]):
        psnr += compare_psnr(tar[i], pred[i])
    return psnr / pred.shape[0]
示例#6
0
def calc_psnr(pic1: np.ndarray, pic2: np.ndarray) -> float:
    # PSNR: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
    psnr = compare_psnr(pic1, pic2)
    # when err == 0, psnr will be 'inf'
    if math.isinf(psnr):
        psnr = 100.0
    # normalize
    return psnr / 100
示例#7
0
def calc_psnr(learned, real, data_range=1.0):
    learned = learned.data.cpu().numpy().astype(np.float32)
    real = real.data.cpu().numpy().astype(np.float32)
    psnr = 0
    for i in range(learned.shape[0]):
        psnr += compare_psnr(real[i, :, :, :],
                             learned[i, :, :, :],
                             data_range=data_range)
    return (psnr / learned.shape[0])
示例#8
0
def batch_PSNR(img, imclean, data_range):
    Img = img.data.cpu().numpy().astype(np.float32)
    Iclean = imclean.data.cpu().numpy().astype(np.float32)
    PSNR = 0
    for i in range(Img.shape[0]):
        PSNR += compare_psnr(Iclean[i, :, :, :],
                             Img[i, :, :, :],
                             data_range=data_range)
    return (PSNR / Img.shape[0])
示例#9
0
def batch_PSNR_vector(img, imclean, data_range):
    Img = img.data.cpu().numpy().astype(np.float32)
    Iclean = imclean.data.cpu().numpy().astype(np.float32)
    PSNR = list()
    for i in range(Img.shape[0]):
        PSNR.append(
            compare_psnr(Iclean[i, :, :, :],
                         Img[i, :, :, :],
                         data_range=data_range))
    return np.array(PSNR)
def acr_optimizer(x_init, x_ground_truth, y_test, n_iter, lambda_acr, lr=0.80):
    x_cvx = x_init.clone().detach().requires_grad_(True).to(device)
    x_optimizer = torch.optim.SGD([x_cvx], lr=lr)
    x_test_np = x_ground_truth.cpu().detach().numpy()
    data_range = np.max(x_test_np) - np.min(x_test_np)

    for iteration in np.arange(n_iter):
        x_optimizer.zero_grad()
        y_cvx = fwd_op(x_cvx)
        data_loss = sq_loss(y_test, y_cvx)

        ####### compute the regularization term ############
        prior_acr = lambda_acr * acr(x_cvx).mean()
        prior_sfb = lambda_acr * sfb(x_cvx).mean()
        prior_l2 = lambda_acr * l2_net(x_cvx).mean()
        prior = prior_acr + prior_sfb + prior_l2

        variational_loss = data_loss + prior

        variational_loss.backward(retain_graph=True)
        x_optimizer.step()
        #lr_scheduler.step()

        x_np = x_cvx.cpu().detach().numpy().squeeze()
        psnr = compare_psnr(np.squeeze(x_test_np), x_np, data_range=data_range)
        ssim = compare_ssim(np.squeeze(x_test_np), x_np, data_range=data_range)

        if (iteration % 50 == 0):
            recon_log = '[iter: {:d}/{:d}\t PSNR: {:.4f}, SSIM: {:.4f}, var_loss: {:.6f}, regularization: ACR {:.6f}, SFB {:.6f}, l2-term {:.6f}]'\
            .format(iteration, n_iter, psnr, ssim, variational_loss.item(), prior_acr.item(), prior_sfb.item(), prior_l2.item())

            print(recon_log)

    x_np = x_cvx.cpu().detach().numpy().squeeze()
    psnr = compare_psnr(np.squeeze(x_test_np), x_np, data_range=data_range)
    ssim = compare_ssim(np.squeeze(x_test_np), x_np, data_range=data_range)
    return x_np, psnr, ssim
示例#11
0
    def evaluate(self, xs, bgs, rfs):
        list_bg_psnr = []
        with torch.no_grad():
            for i in range(xs.size(0)):
                pred_bg, pred_rf, _sm, _fea = self.generator(
                    xs[i].unsqueeze(0))

                pred_bg = pred_bg / 2. + 0.5
                gt_bg = bgs[i].unsqueeze(0) / 2. + 0.5

                list_bg_psnr.append(
                    compare_psnr(
                        np.uint8(np.clip(pred_bg.cpu().numpy(), 0, 1) * 255),
                        np.uint8(np.clip(gt_bg.cpu().numpy(), 0, 1) * 255)))
        mean_psnr = np.mean(list_bg_psnr)
        return mean_psnr
示例#12
0
    def closure():
        global i, psnr_history, psnr_history_short, ssim_history_short

        if reg_noise_std > 0:
            net_input = net_input_saved + (noise.normal_() * reg_noise_std)

        x_hat = net(net_input)
        if loss_type == 'dip':
            tv_weight = 0 # or 0 if no tv (1e-5 radial/gaus or 1e-6 if tv is on with uniform filter)
            fourier_k, fourier_conv = torch_fourier_conv(x_hat, filter(filter_type))
            fft_y = torch.rfft(y, 2, onesided=False, normalized=False).cpu()
            total_loss = mse(fourier_conv, fft_y).to(self.device)
        elif loss_type == 'bp':
            tv_weight = 1e-3 # 1e-3 or 0 if no tv
            total_loss = BP_loss(x_hat, y, noise_lvl, filter_type).to(self.device)

        if tv_weight > 0:
            mul_factor = 0
            #print(total_loss)
            #print(tv_weight * tv_loss(x_hat, mul_factor).to(self.device))
            total_loss = total_loss + tv_weight * tv_loss(x_hat, mul_factor).to(self.device)

        total_loss.backward()

        # Log
        orig_img = imgs['HR_np']
        x_hat_np = torch_to_np(x_hat)
        psnr = compare_psnr(orig_img, x_hat_np)
        ssim = compare_ssim(np.moveaxis(orig_img, 0, -1), np.moveaxis(x_hat_np, 0, -1), multichannel=True)

        # History
        psnr_history.append([psnr])
        if i % 100 == 0:
            psnr_history_short.append([psnr])
            ssim_history_short.append([ssim])
            print('Iteration %05d     PSNR %.3f     SSIM %.3f' % (i, psnr, ssim), '\r')

        if PLOT and i % 100 == 0:
            x_hat_np = torch_to_np(x_hat)
            plot_image_grid([imgs['HR_np'], x_hat_np], factor=13, nrow=3)
            print('Iteration %05d     PSNR %.3f' % (i, psnr), '\r')
            print('Iteration %05d     SSIM %.3f' % (i, ssim), '\r')
        i += 1

        return total_loss
示例#13
0
def batch_psnr(img, imclean, data_range):
    r"""
	Computes the PSNR along the batch dimension (not pixel-wise)

	Args:
		img: a `torch.Tensor` containing the restored image
		imclean: a `torch.Tensor` containing the reference image
		data_range: The data range of the input image (distance between
			minimum and maximum possible values). By default, this is estimated
			from the image data-type.
	"""
    img_cpu = img.data.cpu().numpy().astype(np.float32)
    imgclean = imclean.data.cpu().numpy().astype(np.float32)
    psnr = 0
    for i in range(img_cpu.shape[0]):
        psnr += compare_psnr(imgclean[i, :, :, :], img_cpu[i, :, :, :], \
              data_range=data_range)
    return psnr / img_cpu.shape[0]
示例#14
0
def sequence_psnr(seq, seqclean, data_range=1.0):
    r"""
	Computes the mean PSNR of a sequence (not pixel-wise)

	Args:
		seq: array of dims [num_frames, C, H, W], C=1 grayscale or C=3 RGB, H and W are even.
		seqclean: reference array of dims [num_frames, C, H, W],
			C=1 grayscale or C=3 RGB, H and W are even.
		data_range: The data range of the input image (distance between
			minimum and maximum possible values). By default, this is estimated
			from the image data-type.
	"""
    assert len(seq.shape) == 4
    psnr = 0
    for i in range(seq.shape[0]):
        psnr += compare_psnr(seq[i, :, :, :],
                             seqclean[i, :, :, :],
                             data_range=data_range)
    return psnr / seq.shape[0]
示例#15
0
    def psnr(self, image, ref, roi=None):
        '''
        image: str or array
        ref: str or array
        '''

        # Read image if it is a string
        if type(image) == str:
            image = imread(image).astype(float)

        if type(ref) == str:
            ref = imread(ref).astype(float)

        if roi != None:
            image = image[roi]
            ref = ref[roi]

        psnr = compare_psnr(image, ref)

        return psnr
示例#16
0
 def test(self):
     self.net.eval()
     # torch.set_grad_enabled(False)
     psnrs = list()
     ssims = list()
     for ii, data in enumerate(self.test_loader):
         lr, hr = [x.to(self.device) for x in data]
         sr = self.net(lr)
         sr = torch.clamp(sr, 0, 1)
         sr = sr.cpu().detach().numpy() * 255
         hr = hr.cpu().detach().numpy() * 255
         sr = np.transpose(sr.squeeze(), (1, 2, 0))
         hr = np.transpose(hr.squeeze(), (1, 2, 0))
         sr = sr.astype(np.uint8)
         hr = hr.astype(np.uint8)
         psnr = compare_psnr(hr, sr, data_range=255)
         ssim = compare_ssim(hr, sr, data_range=255, multichannel=True)
         psnrs.append(psnr)
         ssims.append(ssim)
     print('PSNR= {0:.4f}, SSIM= {1:.4f}'.format(np.mean(psnrs),
                                                 np.mean(ssims)))
示例#17
0
# plt.imshow(img, cmap='gray')
# plt.show()

psnrs = {'CCSRResNet' : [], 'WGAN-VGG' : [], 'N3Net' : [], 'DnCNN' : [], 'BM3D' : []}
ssims = {'CCSRResNet' : [], 'WGAN-VGG' : [], 'N3Net' : [], 'DnCNN' : [], 'BM3D' : []}

with tqdm(total=num_samples, ncols=175,
    bar_format="{n_fmt} / {total_fmt} [{bar}]" + \
    " - {postfix[0]}: {postfix[CCSRResNet]}" + \
    " - {postfix[1]}: {postfix[WGAN-VGG]}" + \
    " - {postfix[2]}: {postfix[N3Net]}" + \
    " - {postfix[3]}: {postfix[DnCNN]}" + \
    " - {postfix[4]}: {postfix[BM3D]}",
    postfix={0:'CCSRResNet', 1:'WGAN-VGG', 2:'N3Net', 3:'DnCNN', 4:'BM3D', \
            'CCSRResNet':'...', 'WGAN-VGG':'...', 'N3Net':'...', 'DnCNN':'...', 'BM3D':'...'}) as t:

    for i in range(num_samples):
        gt = load_image(samples['GT'][i], cb=args.crop_border)
        for key in samples.keys():
            if key == 'GT':
                continue
            sample = load_image(samples[key][i], cb=args.crop_border)

            psnrs[key].append(compare_psnr(gt, sample, data_range=255))
            ssims[key].append(compare_ssim(gt, sample, data_range=255))

            t.postfix[key] = '[{0:0.2f}, {1:0.4f}]'.format(np.mean(psnrs[key]), np.mean(ssims[key]))

        t.update(1)

示例#18
0
            highres_vid = uNet(interm_vid)  # (1,16,H,W)

            assert highres_vid.shape == vid.shape
            highres_vid = torch.clamp(highres_vid, min=0, max=1)

            ## converting tensors to numpy arrays
            b1_np = b1.squeeze().data.cpu().numpy()  # (H,W)
            if args.two_bucket:
                b0_np = b0.squeeze().data.cpu().numpy()
            vid_np = vid.squeeze().data.cpu().numpy()  # (9,H,W)
            highres_np = highres_vid.squeeze().data.cpu().numpy()  # (9,H,W)
            full_pred.append(highres_np)
            full_gt.append(vid_np)

            ## psnr
            psnr = compare_psnr(highres_np, vid_np)
            psnr_sum += psnr

            ## ssim
            ssim = 0.
            for sf in range(vid_np.shape[0]):
                ssim += compare_ssim(highres_np[sf],
                                     vid_np[sf],
                                     gaussian_weights=True,
                                     sigma=1.5,
                                     use_sample_covariance=False,
                                     data_range=1.0)
            ssim = ssim / vid_np.shape[0]
            ssim_sum += ssim
            if seq % args.log_interval == 0:
                logging.info('Seq %.2d PSNR: %.2f SSIM: %.3f' %
示例#19
0
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)  #cv2.COLOR_BGR2RGB
im = cv2.resize(im, dsize=(128, 128), interpolation=cv2.INTER_CUBIC) / 256.0
im = np.clip(im, 0.001, 0.999)
plt.title("origin")
plt.imshow(im, vmin=0.0, vmax=1.0)
plt.show()

ksvd = KSVD(n_components=12, transform_n_nonzero_coefs=None, n_jobs=12)
ksvd.fit(im.reshape(-1, 64).astype(np.float64))

D = ksvd.components_.reshape(-1, 8, 8).astype(np.float64)
for i in range(12):
    plt.subplot(3, 4, i + 1)
    plt.imshow(D[i], vmin=0.0, vmax=1.0)
    plt.axis('off')
plt.show()

X = ksvd.transform(im.reshape(-1, 64).astype(np.float64))
plt.title("X")
plt.imshow(X, vmin=0.0, vmax=1.0)
plt.show()

plt.title("Re")
_y = np.dot(X, ksvd.components_)
_y = _y.reshape(128, 128).astype(np.float64)
plt.imshow(_y, vmin=0.0, vmax=1.0)
plt.show()

print(compare_psnr(im, _y))
print(compare_ssim(im, _y))
PSNR_fileWriter = csv.writer(final_psnr_file)
final_bm3d_file = open(directory + 'final_bm3d_log_%s.csv' % loss_type, 'a')
bm3d_fileWriter = csv.writer(final_bm3d_file)

for img in GT_imgs:
    #for img in ['bridge.png']:

    print(img)
    I = cv.imread(imgs_dir + img)
    I = np.float32(I)
    I = np.moveaxis(I, 2, 0) / 255.

    I_DIP, network, z, psnr_history, ssim_history, psnr_bm3d, ssim_bm3d, blurred_img = deblurring.dip_deblur(
        img, noise_lvl, filter_type, loss_type, directory)

    final_psnr = compare_psnr(I[[2, 1, 0], :, :], I_DIP)
    print('psnr = %.4f' % (final_psnr))

    row_str = ['%f' % final_psnr]
    PSNR_fileWriter.writerow(row_str)

    row_str = ['%f %f' % (psnr_bm3d, ssim_bm3d)]
    bm3d_fileWriter.writerow(row_str)

    ### for deciding number of iterations bvased on average psnr
    with open(directory + 'psnr_history_%s.txt' % loss_type, 'a') as f:
        for item in psnr_history:
            f.write("%s\n" % item)

    with open(directory + 'ssim_history_%s.txt' % loss_type, 'a') as f:
        for item in ssim_history:
示例#21
0
def psnr(gt, pred):
    """ Compute Peak Signal to Noise Ratio metric (PSNR) """
    return compare_psnr(gt, pred, data_range=gt.max() - gt.min())
    #convex reg. reconstruction and FBP
    if clip_fbp:
        fbp_image = mayo_utils.cut_image(fbp_image, vmin=0.0, vmax=1.0)
        n_iter, lambda_acr = 400, 0.05
    else:
        n_iter, lambda_acr = 350, 0.04

    x_init = torch.from_numpy(fbp_image).view(fbp.size()).to(device)
    x_np_cvx, psnr_cvx, ssim_cvx = acr_optimizer(x_init,
                                                 phantom,
                                                 sinogram,
                                                 n_iter=n_iter,
                                                 lambda_acr=lambda_acr)

    psnr_fbp = compare_psnr(phantom_image, fbp_image, data_range=data_range)
    ssim_fbp = compare_ssim(phantom_image, fbp_image, data_range=data_range)

    recon_log = 'test-image [{:d}/{:d}]:\t FBP: PSNR {:.4f}, SSIM {:.4f}\t convex-reg: PSNR {:.4f}, SSIM {:.4f}\n'\
        .format(idx, len(eval_dataloader), psnr_fbp, ssim_fbp, psnr_cvx, ssim_cvx)

    print(recon_log)
    log_file.write(recon_log)

    ### compute running sum for average
    psnr_fbp_avg += psnr_fbp
    ssim_fbp_avg += ssim_fbp
    psnr_cvx_avg += psnr_cvx
    ssim_cvx_avg += ssim_cvx
    num_test_images += 1
    def closure():
        global i, net_input, psnr_history, psnr_history_short_HR, psnr_history_short_LR, orig_img_HR, per_sim_history

        if reg_noise_std > 0:
            net_input = net_input_saved + (noise.normal_() * reg_noise_std)

        out_HR = net(net_input)
        out_LR = custom_downsample(out_HR, factor, use_5_per)

        if loss_type == 'dip':
            total_loss = mse_fft(out_LR, img_LR_var)
            #total_loss = mse(out_LR, img_LR_var)
        elif loss_type == 'bp':
            total_loss = mse_bp(out_LR, img_LR_var.unsqueeze(0), factor)

        #print(total_loss)
        #print(tv_loss_try_abs(out_HR))
        if tv_weight > 0:
            mul_factor = 1
            total_loss = total_loss + tv_weight * tv_loss(
                out_HR, mul_factor).to(self.device)

        total_loss.backward()

        #high_res_ = np.squeeze(high_res, axis=0)
        #orig_img_HR = high_res_
        orig_img_HR = np.squeeze(high_res_, axis=0)
        orig_img_LR = low_res
        out_HR = out_HR.squeeze()
        out_LR = out_LR.squeeze()

        # History
        if i % 100 == 0:
            psnr_LR = compare_psnr(orig_img_LR,
                                   torch_to_np(out_LR),
                                   data_range=1)
            #psnr_HR = compare_psnr(orig_img_HR[:,pix_ignore:-pix_ignore, pix_ignore:-pix_ignore], torch_to_np(out_HR.unsqueeze(0))[:,pix_ignore:-pix_ignore, pix_ignore:-pix_ignore], data_range=1)
            psnr_HR = compare_psnr(orig_img_HR[pix_ignore:-pix_ignore,
                                               pix_ignore:-pix_ignore],
                                   torch_to_np(out_HR)[pix_ignore:-pix_ignore,
                                                       pix_ignore:-pix_ignore],
                                   data_range=1)
            if use_5_per == False:
                perc_sim_HR = compute_dists.compute(
                    orig_img_HR[pix_ignore:-pix_ignore,
                                pix_ignore:-pix_ignore],
                    torch_to_np(out_HR)[pix_ignore:-pix_ignore,
                                        pix_ignore:-pix_ignore])
            else:
                perc_sim_HR = np.zeros(5)
                for j in range(5):
                    perc_sim_HR[j] = compute_dists.compute(
                        orig_img_HR[j, pix_ignore:-pix_ignore,
                                    pix_ignore:-pix_ignore],
                        torch_to_np(out_HR)[j, pix_ignore:-pix_ignore,
                                            pix_ignore:-pix_ignore])
            psnr_history_short_HR.append([psnr_HR])
            psnr_history_short_LR.append([psnr_LR])
            per_sim_history.append([perc_sim_HR.mean()])

        if PLOT and i % 100 == 0:
            out_HR_np = torch_to_np(out_HR)
            if use_5_per == False:
                # plot_image_grid([np.expand_dims(high_res_, axis=0), img_nearest, img_bicubic, np.expand_dims(np.clip(out_HR_np, 0, 1), axis=0)],
                #                 directory, 'compare_img_'+img_name_for_plot, factor=13, nrow=4)
                plot_image_grid([
                    high_res_, img_nearest, img_bicubic,
                    np.expand_dims(np.clip(out_HR_np, 0, 1), axis=0)
                ],
                                directory,
                                'compare_img_' + img_name_for_plot,
                                factor=13,
                                nrow=4)
                plt.imsave(directory + 'orig_' + img_name_for_plot + '.png',
                           np.squeeze(high_res_),
                           cmap='gray')
                plt.imsave(directory + 'nn_' + img_name_for_plot + '.png',
                           np.squeeze(img_nearest),
                           cmap='gray')
                plt.imsave(directory + 'bicubic_' + img_name_for_plot + '.png',
                           np.squeeze(img_bicubic),
                           cmap='gray')
                plt.imsave(directory + 'dip_' + img_name_for_plot + '.png',
                           np.clip(out_HR_np, 0, 1),
                           cmap='gray')
            else:
                for j in range(5):
                    plot_image_grid([
                        np.expand_dims(high_res_[j, :, :], axis=0),
                        np.expand_dims(img_bicubic[j, :, :], axis=0),
                        np.expand_dims(np.clip(out_HR_np[j, :, :], 0, 1),
                                       axis=0)
                    ],
                                    directory,
                                    'compare_img_' + img_name_for_plot +
                                    'frame' + str(j),
                                    factor=13,
                                    nrow=3)
        if PLOT_PSNR and i % 100 == 0:
            print(
                'Iteration %04d    PSNR %.3f   perc_sim %.3f' %
                (i, psnr_HR, perc_sim_HR.mean()), '\r')
        if i == num_iter - 1:
            out_HR_np = torch_to_np(out_HR)
            if use_5_per == False:
                # imwrite_multi_tiff([high_res_, np.squeeze(img_bicubic), np.clip(out_HR_np, 0, 1)],
                #                    directory + 'final_comparison_' + img_name_for_plot + '.tiff')
                imwrite_multi_tiff([
                    np.squeeze(high_res_),
                    np.clip(out_HR_np, 0, 1),
                    np.squeeze(img_bicubic)
                ], directory + 'final_comparison_' + img_name_for_plot +
                                   '.tiff')
            else:
                for j in range(5):
                    imwrite_multi_tiff([
                        high_res_[j, :, :],
                        np.clip(out_HR_np[j, :, :], 0, 1),
                        np.squeeze(img_bicubic[j, :, :])
                    ], directory + 'final_comparison_' + img_name_for_plot +
                                       'frame' + str(j) + '.tiff')
            #imwrite_multi_tiff([out_LR.detach().cpu().numpy(), img_LR_var.detach().cpu().numpy().squeeze()], directory + '1.tiff')
            #imwrite_multi_tiff([out_LR.detach().cpu().numpy(), img_LR_var.detach().cpu().numpy().squeeze()], directory + '1.tiff')

            # ### heat map ###
            # img_diff = (high_res_ - np.clip(out_HR_np, 0, 1)) ** 2
            # heat_map = ndimage.filters.gaussian_filter(img_diff, sigma=16)
            # max_val = np.max(heat_map)
            # min_val = np.min(heat_map)
            # norm_heat_map = (heat_map - min_val) / (max_val - min_val)
            # plt.imshow(high_res_)
            # plt.imshow(255 * norm_heat_map, alpha=0.5, cmap='viridis')
            # plt.axis('on')
            # plt.savefig(directory + 'heat_map_' + img_name_for_plot + '.jpg', bbox_inches='tight', pad_inches=0)

        i += 1

        return total_loss
def psnr(frames1, frames2):
    error = 0
    for i in range(len(frames1)):
        error += compare_psnr(frames1[i], frames2[i])
    return error / len(frames1)
示例#25
0
def calc_psnr(im1, im2):
    im1_y = cv2.cvtColor(im1, cv2.COLOR_BGR2YCR_CB)[:, :, 0]
    im2_y = cv2.cvtColor(im2, cv2.COLOR_BGR2YCR_CB)[:, :, 0]
    return compare_psnr(im1_y, im2_y)
def dip_sr(img_name, loss_type, directory, pix_ignore, factor, imgs_dir,
           use_5_per):

    img_name_for_plot = img_name[0:-5]
    print(img_name_for_plot)
    learning_rate = 1e-3
    OPTIMIZER = 'adam'
    if factor == 3:
        num_iter = 2000
        reg_noise_std = 0.03
    if factor == 5:
        num_iter = 600
        reg_noise_std = 0.03

    tv_weight = 0
    PLOT = True
    PLOT_PSNR = True

    path_to_image = imgs_dir + img_name
    low_res, high_res = load_images_publication.tiff_imgs(
        path_to_image, factor, use_5_per)

    # sanity check
    #low_res = create_lr_image.create(high_res)
    if use_5_per == False:
        high_res_pil = Image.fromarray(np.uint8(high_res * 255), 'L')
        low_res_pil = Image.fromarray(np.uint8(low_res * 255), 'L')
        img_bicubic, img_sharp, img_nearest = get_baselines(
            low_res_pil, high_res_pil)
        high_res = np.expand_dims(high_res, axis=0)
        low_res_for_up = torch.from_numpy(low_res)
        low_res_for_up = torch.unsqueeze(low_res_for_up, 0).unsqueeze(0)
        img_upsampled_torch, img_bicubic1 = upsample_using_h(
            low_res_for_up, factor, self.device)
        img_upsampled = img_upsampled_torch.cpu().numpy()
        img_upsampled = np.squeeze(img_upsampled, axis=0)
        img_upsampled = (img_upsampled - img_upsampled.min()) / (
            img_upsampled.max() - img_upsampled.min())
    else:
        img_bicubic = np.zeros((5, high_res.shape[1], high_res.shape[2]))
        img_nearest = np.zeros((5, high_res.shape[1], high_res.shape[2]))
        for j in range(5):
            img_bicubic[j, :, :] = cv2.resize(low_res[j, :, :],
                                              dsize=high_res.shape[1:],
                                              interpolation=cv2.INTER_CUBIC)
            img_nearest[j, :, :] = cv2.resize(low_res[j, :, :],
                                              dsize=high_res.shape[1:],
                                              interpolation=cv2.INTER_NEAREST)

    high_res_ = high_res

    psnr_bicubic = compare_psnr(high_res, img_bicubic, data_range=1)
    psnr_nn = compare_psnr(high_res, img_nearest, data_range=1)
    psnr_custom = compare_psnr(high_res, img_upsampled, data_range=1)
    if use_5_per == False:
        perc_sim_bicubic = compute_dists.compute(high_res[0, :, :],
                                                 img_bicubic[0, :, :])
        perc_sim_bicubic = perc_sim_bicubic.cpu().detach().numpy()[0][0][0][0]
        perc_sim_nn = compute_dists.compute(high_res[0, :, :],
                                            img_nearest[0, :, :])
        perc_sim_nn = perc_sim_nn.cpu().detach().numpy()[0][0][0][0]
        perc_sim_custom = compute_dists.compute(high_res[0, :, :],
                                                img_upsampled[0, :, :])
        perc_sim_custom = perc_sim_custom.cpu().detach().numpy()[0][0][0][0]

    else:
        perc_sim_bicubic = np.zeros(5)
        perc_sim_nn = np.zeros(5)
        for j in range(5):
            perc_sim_bicubic[j] = compute_dists.compute(
                high_res[j, :, :], img_bicubic[j, :, :])
            #perc_sim_bicubic[j] = perc_sim_bicubic.cpu().detach().numpy()[0][0][0][0]
            perc_sim_nn[j] = compute_dists.compute(high_res[j, :, :],
                                                   img_nearest[j, :, :])
            #perc_sim_nn[j] = perc_sim_nn.cpu().detach().numpy()[0][0][0][0]

    #psnr_basic = compare_psnr(high_res_, low_for_psnr)
    if PLOT:
        if use_5_per == False:
            plot_image_grid([high_res, img_bicubic, img_nearest], directory,
                            'basic_compare_img_' + img_name_for_plot, 3, 12)
        else:
            for j in range(5):
                plot_image_grid([
                    np.expand_dims(high_res[j, :, :], axis=0),
                    np.expand_dims(img_bicubic[j, :, :], axis=0),
                    np.expand_dims(img_nearest[j, :, :], axis=0)
                ], directory, 'basic_compare_img_' + img_name_for_plot +
                                'frame_' + str(j), 3, 12)
        print('PSNR bicubic: %.4f   PSNR nearest: %.4f' %
              (psnr_bicubic, psnr_nn))
        print('per_sim bicubic: %.4f   per_sim nearest: %.4f' %
              (perc_sim_bicubic.mean(), perc_sim_nn.mean()))

    input_depth = 32
    INPUT = 'noise'
    pad = 'reflection'
    OPT_OVER = 'net'
    #KERNEL_TYPE = 'lanczos2'

    net_input = get_noise(
        input_depth, INPUT,
        (high_res.shape[-1], high_res.shape[-2])).type(dtype).detach()

    NET_TYPE = 'skip'  # UNet, ResNet
    net = get_net(input_depth,
                  'skip',
                  pad,
                  n_channels=high_res.shape[0],
                  skip_n33d=128,
                  skip_n33u=128,
                  skip_n11=4,
                  num_scales=5,
                  upsample_mode='bilinear').type(dtype)

    # Losses
    img_LR_var = np_to_torch(low_res).type(dtype)

    def closure():
        global i, net_input, psnr_history, psnr_history_short_HR, psnr_history_short_LR, orig_img_HR, per_sim_history

        if reg_noise_std > 0:
            net_input = net_input_saved + (noise.normal_() * reg_noise_std)

        out_HR = net(net_input)
        out_LR = custom_downsample(out_HR, factor, use_5_per)

        if loss_type == 'dip':
            total_loss = mse_fft(out_LR, img_LR_var)
            #total_loss = mse(out_LR, img_LR_var)
        elif loss_type == 'bp':
            total_loss = mse_bp(out_LR, img_LR_var.unsqueeze(0), factor)

        #print(total_loss)
        #print(tv_loss_try_abs(out_HR))
        if tv_weight > 0:
            mul_factor = 1
            total_loss = total_loss + tv_weight * tv_loss(
                out_HR, mul_factor).to(self.device)

        total_loss.backward()

        #high_res_ = np.squeeze(high_res, axis=0)
        #orig_img_HR = high_res_
        orig_img_HR = np.squeeze(high_res_, axis=0)
        orig_img_LR = low_res
        out_HR = out_HR.squeeze()
        out_LR = out_LR.squeeze()

        # History
        if i % 100 == 0:
            psnr_LR = compare_psnr(orig_img_LR,
                                   torch_to_np(out_LR),
                                   data_range=1)
            #psnr_HR = compare_psnr(orig_img_HR[:,pix_ignore:-pix_ignore, pix_ignore:-pix_ignore], torch_to_np(out_HR.unsqueeze(0))[:,pix_ignore:-pix_ignore, pix_ignore:-pix_ignore], data_range=1)
            psnr_HR = compare_psnr(orig_img_HR[pix_ignore:-pix_ignore,
                                               pix_ignore:-pix_ignore],
                                   torch_to_np(out_HR)[pix_ignore:-pix_ignore,
                                                       pix_ignore:-pix_ignore],
                                   data_range=1)
            if use_5_per == False:
                perc_sim_HR = compute_dists.compute(
                    orig_img_HR[pix_ignore:-pix_ignore,
                                pix_ignore:-pix_ignore],
                    torch_to_np(out_HR)[pix_ignore:-pix_ignore,
                                        pix_ignore:-pix_ignore])
            else:
                perc_sim_HR = np.zeros(5)
                for j in range(5):
                    perc_sim_HR[j] = compute_dists.compute(
                        orig_img_HR[j, pix_ignore:-pix_ignore,
                                    pix_ignore:-pix_ignore],
                        torch_to_np(out_HR)[j, pix_ignore:-pix_ignore,
                                            pix_ignore:-pix_ignore])
            psnr_history_short_HR.append([psnr_HR])
            psnr_history_short_LR.append([psnr_LR])
            per_sim_history.append([perc_sim_HR.mean()])

        if PLOT and i % 100 == 0:
            out_HR_np = torch_to_np(out_HR)
            if use_5_per == False:
                # plot_image_grid([np.expand_dims(high_res_, axis=0), img_nearest, img_bicubic, np.expand_dims(np.clip(out_HR_np, 0, 1), axis=0)],
                #                 directory, 'compare_img_'+img_name_for_plot, factor=13, nrow=4)
                plot_image_grid([
                    high_res_, img_nearest, img_bicubic,
                    np.expand_dims(np.clip(out_HR_np, 0, 1), axis=0)
                ],
                                directory,
                                'compare_img_' + img_name_for_plot,
                                factor=13,
                                nrow=4)
                plt.imsave(directory + 'orig_' + img_name_for_plot + '.png',
                           np.squeeze(high_res_),
                           cmap='gray')
                plt.imsave(directory + 'nn_' + img_name_for_plot + '.png',
                           np.squeeze(img_nearest),
                           cmap='gray')
                plt.imsave(directory + 'bicubic_' + img_name_for_plot + '.png',
                           np.squeeze(img_bicubic),
                           cmap='gray')
                plt.imsave(directory + 'dip_' + img_name_for_plot + '.png',
                           np.clip(out_HR_np, 0, 1),
                           cmap='gray')
            else:
                for j in range(5):
                    plot_image_grid([
                        np.expand_dims(high_res_[j, :, :], axis=0),
                        np.expand_dims(img_bicubic[j, :, :], axis=0),
                        np.expand_dims(np.clip(out_HR_np[j, :, :], 0, 1),
                                       axis=0)
                    ],
                                    directory,
                                    'compare_img_' + img_name_for_plot +
                                    'frame' + str(j),
                                    factor=13,
                                    nrow=3)
        if PLOT_PSNR and i % 100 == 0:
            print(
                'Iteration %04d    PSNR %.3f   perc_sim %.3f' %
                (i, psnr_HR, perc_sim_HR.mean()), '\r')
        if i == num_iter - 1:
            out_HR_np = torch_to_np(out_HR)
            if use_5_per == False:
                # imwrite_multi_tiff([high_res_, np.squeeze(img_bicubic), np.clip(out_HR_np, 0, 1)],
                #                    directory + 'final_comparison_' + img_name_for_plot + '.tiff')
                imwrite_multi_tiff([
                    np.squeeze(high_res_),
                    np.clip(out_HR_np, 0, 1),
                    np.squeeze(img_bicubic)
                ], directory + 'final_comparison_' + img_name_for_plot +
                                   '.tiff')
            else:
                for j in range(5):
                    imwrite_multi_tiff([
                        high_res_[j, :, :],
                        np.clip(out_HR_np[j, :, :], 0, 1),
                        np.squeeze(img_bicubic[j, :, :])
                    ], directory + 'final_comparison_' + img_name_for_plot +
                                       'frame' + str(j) + '.tiff')
            #imwrite_multi_tiff([out_LR.detach().cpu().numpy(), img_LR_var.detach().cpu().numpy().squeeze()], directory + '1.tiff')
            #imwrite_multi_tiff([out_LR.detach().cpu().numpy(), img_LR_var.detach().cpu().numpy().squeeze()], directory + '1.tiff')

            # ### heat map ###
            # img_diff = (high_res_ - np.clip(out_HR_np, 0, 1)) ** 2
            # heat_map = ndimage.filters.gaussian_filter(img_diff, sigma=16)
            # max_val = np.max(heat_map)
            # min_val = np.min(heat_map)
            # norm_heat_map = (heat_map - min_val) / (max_val - min_val)
            # plt.imshow(high_res_)
            # plt.imshow(255 * norm_heat_map, alpha=0.5, cmap='viridis')
            # plt.axis('on')
            # plt.savefig(directory + 'heat_map_' + img_name_for_plot + '.jpg', bbox_inches='tight', pad_inches=0)

        i += 1

        return total_loss

    global psnr_history, psnr_history_short_HR, psnr_history_short_LR, orig_img_HR, per_sim_history
    psnr_history = []
    psnr_history_short_HR = []
    psnr_history_short_LR = []
    per_sim_history = []

    net_input_saved = net_input.detach().clone()
    noise = net_input.detach().clone()

    global i
    i = 0
    p = get_params(OPT_OVER, net, net_input)
    optimize(OPTIMIZER, p, closure, learning_rate, num_iter)

    out_HR_np = np.clip(torch_to_np(net(net_input)), 0, 1)
    #result_deep_prior = put_in_center(out_HR_np, imgs['orig_np'].shape[1:])

    return out_HR_np, orig_img_HR, net, net_input, psnr_history_short_HR, psnr_history_short_LR, \
           per_sim_history, psnr_bicubic, psnr_nn, psnr_custom, perc_sim_bicubic, perc_sim_nn, perc_sim_custom
    def closure_mcdip():

        global i, out_avg, psnr_noisy_last, last_net, net_input, losses, psnrs, ssims, average_dropout_rate, no_layers,\
               img_mean, sample_count, recons, uncerts, uncerts_ale, loss_last

        if reg_noise_std > 0:
            net_input = net_input_saved + (noise.normal_() * reg_noise_std)

        out = net(net_input)
        out[:, :1] = out[:, :1].sigmoid()

        _loss = gaussian_nll(out[:, :1], out[:, 1:], img_noisy_torch)
        _loss.backward()

        out[:, 1:] = torch.exp(-out[:, 1:])  # aleatoric uncertainty

        # Smoothing
        if out_avg is None:
            out_avg = out.detach()
        else:
            out_avg = out_avg * exp_weight + out.detach() * (1 - exp_weight)

        losses.append(mse(out_avg[:, :1], img_noisy_torch).item())

        _out = out.detach().cpu().numpy()[0, :1]
        _out_avg = out_avg.detach().cpu().numpy()[0, :1]

        psnr_noisy = compare_psnr(img_noisy_np, _out)
        psnr_gt = compare_psnr(img_np, _out)
        psnr_gt_sm = compare_psnr(img_np, _out_avg)

        ssim_noisy = compare_ssim(img_noisy_np[0], _out[0])
        ssim_gt = compare_ssim(img_np[0], _out[0])
        ssim_gt_sm = compare_ssim(img_np[0], _out_avg[0])

        psnrs.append([psnr_noisy, psnr_gt, psnr_gt_sm])
        ssims.append([ssim_noisy, ssim_gt, ssim_gt_sm])

        if PLOT and i % show_every == 0:
            print(
                f'Iteration: {i} Loss: {_loss.item():.4f} PSNR_noisy: {psnr_noisy:.4f} PSRN_gt: {psnr_gt:.4f} PSNR_gt_sm: {psnr_gt_sm:.4f}'
            )

            img_list = []
            aleatoric_list = []

            with torch.no_grad():
                net_input = net_input_saved + (noise.normal_() * reg_noise_std)

                for _ in range(mc_iter):
                    img = net(net_input)
                    img[:, :1] = img[:, :1].sigmoid()
                    img[:, 1:] = torch.exp(-img[:, 1:])
                    img_list.append(torch_to_np(img[:1]))
                    aleatoric_list.append(torch_to_np(img[:, 1:]))

            img_list_np = np.array(img_list)
            out_np = np.mean(img_list_np, axis=0)[:1]
            out_np_ale = np.mean(aleatoric_list, axis=0)[:1]
            out_np_var = np.var(img_list_np, axis=0)[:1]

            psnr_noisy = compare_psnr(img_noisy_np, out_np)
            psnr_gt = compare_psnr(img_np, out_np)

            print('mean epi', out_np_var.mean())
            print('mean ale', out_np_ale.mean())
            print('###################')

            recons.append(out_np)
            uncerts.append(out_np_var)
            uncerts_ale.append(out_np_ale)

        i += 1

        return _loss
示例#28
0
xs = np.random.randint(0, 256 - args.random_crop) if size != None else 0
ys = np.random.randint(0, 256 - args.random_crop) if size != None else 0

plt.style.use('dark_background')
fig, axes = plt.subplots(2, 3, figsize=(10, 6))
fig.canvas.set_window_title('Compare results')

for i, key in enumerate(samples.keys()):
    x = i // 3
    y = i % 3
    img = load_image(samples[key][idx], xs=xs, ys=ys, size=size)
    if i == 0:
        gt = img.copy()
        text = 'PSNR: inf SSIM: 1.0000'
    else:
        psnr = compare_psnr(gt, img, data_range=255)
        ssim = compare_ssim(gt, img, data_range=255)
        text = 'PSNR: {0:0.2f} SSIM: {1:0.4f}'.format(psnr, ssim)

    axes[x, y].imshow(img, cmap='gray')
    axes[x, y].set_title(key)
    axes[x, y].set_xticks([])
    axes[x, y].set_yticks([])
    axes[x, y].text(0.5,
                    -0.05,
                    text,
                    size=8,
                    ha="center",
                    transform=axes[x, y].transAxes)

plt.show()
示例#29
0
def dip_deblur(img_name, noise_lvl, filter_type, loss_type, directory):
    learning_rate = 0.01
    OPTIMIZER = 'adam'
    if loss_type == 'dip':
        num_iter = 10000
    elif loss_type == 'bp':
        num_iter = 7000
    reg_noise_std = 0.03
    PLOT = False

    path_to_image = 'data_set14/' + img_name
    ### Load image and baselines ###
    imgs = load_LR_HR_imgs_sr(path_to_image, imsize, factor, enforse_div32)
    if imgs['HR_np'].shape[0] == 1:
        imgs['HR_np'] = cv2.cvtColor(np.moveaxis(imgs['HR_np'], 0, 2), cv2.COLOR_GRAY2RGB)
        imgs['HR_np'] = np.moveaxis(imgs['HR_np'], 2, 0)

    ### Set up parameters and net ###
    input_depth = 32
    INPUT = 'noise'
    pad = 'reflection'
    OPT_OVER = 'net'

    net_input = get_noise(input_depth, INPUT, (imgs['HR_pil'].size[1], imgs['HR_pil'].size[0])).type(dtype).detach()

    NET_TYPE = 'skip'  # UNet, ResNet
    net = get_net(input_depth, NET_TYPE, pad,
                  skip_n33d=128,
                  skip_n33u=128,
                  skip_n11=4,
                  num_scales=5,
                  upsample_mode='bilinear').type(dtype)

    tmp_img = torch.tensor(imgs['HR_np']).unsqueeze(0).to(self.device)
    img_blurred = blur(tmp_img, filter_type)
    noise_lvl = np.sqrt(noise_lvl) / 255
    e = torch.randn(img_blurred.shape).to(self.device) * noise_lvl  # noise with normal distribution
    y = img_blurred + e

    ### bm3d ###
    psf = filter(filter_type).cpu().numpy()
    psf = np.squeeze(psf)
    y_bm3d = y.cpu().numpy()
    y_bm3d = np.moveaxis(y_bm3d, 1, -1)
    y_bm3d = np.squeeze(y_bm3d)
    img_bm3d = bm3d.bm3d_deblurring(y_bm3d, noise_lvl, psf, 'np')
    img_bm3d = np.moveaxis(img_bm3d, -1, 0)
    psnr_bm3d = compare_psnr(imgs['HR_np'], img_bm3d)
    ssim_bm3d = compare_ssim(np.moveaxis(imgs['HR_np'], 0, -1), np.moveaxis(img_bm3d, 0, -1), multichannel=True)

    ### Define closure and optimize ###
    def closure():
        global i, psnr_history, psnr_history_short, ssim_history_short

        if reg_noise_std > 0:
            net_input = net_input_saved + (noise.normal_() * reg_noise_std)

        x_hat = net(net_input)
        if loss_type == 'dip':
            tv_weight = 0 # or 0 if no tv (1e-5 radial/gaus or 1e-6 if tv is on with uniform filter)
            fourier_k, fourier_conv = torch_fourier_conv(x_hat, filter(filter_type))
            fft_y = torch.rfft(y, 2, onesided=False, normalized=False).cpu()
            total_loss = mse(fourier_conv, fft_y).to(self.device)
        elif loss_type == 'bp':
            tv_weight = 1e-3 # 1e-3 or 0 if no tv
            total_loss = BP_loss(x_hat, y, noise_lvl, filter_type).to(self.device)

        if tv_weight > 0:
            mul_factor = 0
            #print(total_loss)
            #print(tv_weight * tv_loss(x_hat, mul_factor).to(self.device))
            total_loss = total_loss + tv_weight * tv_loss(x_hat, mul_factor).to(self.device)

        total_loss.backward()

        # Log
        orig_img = imgs['HR_np']
        x_hat_np = torch_to_np(x_hat)
        psnr = compare_psnr(orig_img, x_hat_np)
        ssim = compare_ssim(np.moveaxis(orig_img, 0, -1), np.moveaxis(x_hat_np, 0, -1), multichannel=True)

        # History
        psnr_history.append([psnr])
        if i % 100 == 0:
            psnr_history_short.append([psnr])
            ssim_history_short.append([ssim])
            print('Iteration %05d     PSNR %.3f     SSIM %.3f' % (i, psnr, ssim), '\r')

        if PLOT and i % 100 == 0:
            x_hat_np = torch_to_np(x_hat)
            plot_image_grid([imgs['HR_np'], x_hat_np], factor=13, nrow=3)
            print('Iteration %05d     PSNR %.3f' % (i, psnr), '\r')
            print('Iteration %05d     SSIM %.3f' % (i, ssim), '\r')
        i += 1

        return total_loss

    global psnr_history, psnr_history_short, ssim_history_short
    psnr_history = []
    psnr_history_short = []
    ssim_history_short = []
    net_input_saved = net_input.detach().clone()
    noise = net_input.detach().clone()

    global i
    i = 0
    p = get_params(OPT_OVER, net, net_input)
    optimize(OPTIMIZER, p, closure, learning_rate, num_iter)

    # get final result (constructed image)
    constructed_img = np.clip(torch_to_np(net(net_input)), 0, 1)

    return constructed_img, net, net_input, psnr_history_short, ssim_history_short, psnr_bm3d, ssim_bm3d, y
示例#30
0
def ssim_mse_psnr(image_true, image_test):
    mse = compare_mse(image_true, image_test)
    ssim = compare_ssim(image_true, image_test)
    psnr = compare_psnr(image_true, image_test)
    return ssim, mse, psnr