Ejemplo n.º 1
0
 def evaluate(self,
              adv_xs=None,
              cln_xs=None,
              cln_ys=None,
              adv_ys=None,
              target_preds=None,
              target_flag=False):
     '''
     @description:
     @param {
         adv_xs: 攻击样本
         cln_xs:原始样本
         cln_ys: 原始类别,非目标攻击下原始样本的类型
         adv_ys: 攻击样本的预测类别
         target_preds: 目标攻击下希望原始样本攻击的目标类别
         target_flag:是否是目标攻击
     }
     @return: ass {Average Structural Similarity}
     '''
     total = len(adv_xs)
     print("total", total)
     ori_r_channel = np.transpose(np.round(cln_xs.numpy() * 255),
                                  (0, 2, 3, 1)).astype(dtype=np.float32)
     adv_r_channel = np.transpose(np.round(adv_xs.numpy() * 255),
                                  (0, 2, 3, 1)).astype(dtype=np.float32)
     totalSSIM = 0
     number = 0
     predicts = list()
     outputs = torch.from_numpy(self.outputs_adv)
     preds = torch.argmax(outputs, 1)
     preds = preds.data.numpy()
     predicts.extend(preds)
     labels = target_preds.numpy()
     if not target_flag:
         for i in range(len(predicts)):
             if predicts[i] != labels[i]:
                 number += 1
                 totalSSIM += SSIM(X=ori_r_channel[i],
                                   Y=adv_r_channel[i],
                                   multichannel=True)
     else:
         for i in range(len(predicts)):
             if predicts[i] == labels[i]:
                 number += 1
                 totalSSIM += SSIM(X=ori_r_channel[i],
                                   Y=adv_r_channel[i],
                                   multichannel=True)
     if not number == 0:
         ass = totalSSIM / number
     else:
         ass = totalSSIM / (number + MIN_COMPENSATION)
     return ass
Ejemplo n.º 2
0
    def get_acc(self, output, target):
        fake = self.tensor2im(output.data)
        real = self.tensor2im(target.data)
        psnr = PSNR(fake, real)
        ssim = SSIM(fake, real, multichannel=True)

        return psnr, ssim
Ejemplo n.º 3
0
    def avg_SSIM(self):

        ori_r_channel = np.round(self.nature_samples *
                                 255).astype(dtype=np.float32)
        adv_r_channel = np.round(self.adv_samples *
                                 255).astype(dtype=np.float32)

        totalSSIM = 0
        cnt = 0
        """
        For SSIM function in skimage: http://scikit-image.org/docs/dev/api/skimage.measure.html

        multichannel : bool, optional If True, treat the last dimension of the array as channels. Similarity calculations are done 
        independently for each channel then averaged.
        """
        for i in range(len(self.adv_samples)):
            if self.successful(adv_softmax_preds=self.softmax_prediction[i],
                               nature_true_preds=self.labels_samples[i]):
                cnt += 1
                totalSSIM += SSIM(X=ori_r_channel[i],
                                  Y=adv_r_channel[i],
                                  multichannel=True)

        print('ASS:\t{:.3f}'.format(totalSSIM / cnt))
        return totalSSIM / cnt
Ejemplo n.º 4
0
def train():
    ckpt_path = FLAGS.train_dir
    if not os.path.exists(ckpt_path):
        os.makedirs(ckpt_path)

    data = get_data(FLAGS.test_data_in_path)
    data -= 128
    data = utils.padding(data)
    #target_batch,input_batch=dataset.get_batch(flags.batch_size)
    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        model, wrong = create_model(ckpt_path, FLAGS.optimizer, sess)
        if (wrong == True):
            return
        #_,training_loss=model.step(sess,input_batch,target_batch,training=True)
        prediction, _ = model.step(sess, data, data, training=False)

    #print prediction
    prediction = np.reshape(prediction, prediction.shape[1:4])
    out_im = Image.fromarray(prediction.astype(np.uint8))
    out_im.save(FLAGS.test_data_out_path)

    #evalute
    im = Image.open(FLAGS.original_data_path)
    original = np.array(im, dtype=np.uint8)
    prediction = np.uint8(prediction)
    PSNR_score = PSNR(original, prediction)
    SSIM_score, _ = SSIM(original, prediction, full=True, multichannel=True)
    print 'PSNR:' + str(PSNR_score) + ' SSIM:' + str(SSIM_score)
Ejemplo n.º 5
0
def compute_ssim(a, b):
    ssim = []
    num_imgs = a.shape[0]
    for i in range(num_imgs):
        ssim.append(SSIM(a[i], b[i], data_range=(1 - (-1)), multichannel=True))

    # dssim
    return (1 - np.array(ssim).mean()) / 2
Ejemplo n.º 6
0
 def get_images_and_metrics(self, inp, output, target):
     inp = self.tensor2im(inp)
     fake = self.tensor2im(output.data)
     real = self.tensor2im(target.data)
     psnr = PSNR(fake, real)
     ssim = SSIM(fake, real, multichannel=True)
     vis_img = np.hstack((inp, fake, real))
     return psnr, ssim, vis_img
Ejemplo n.º 7
0
def get_metrics(output, ground_truth, criterionMSE):
    img1 = np.tensordot(output.cpu().numpy()[0, :3].transpose(1, 2, 0),
                        [0.298912, 0.586611, 0.114478],
                        axes=1)
    img2 = np.tensordot(ground_truth.cpu().numpy()[0, :3].transpose(1, 2, 0),
                        [0.298912, 0.586611, 0.114478],
                        axes=1)

    mse = criterionMSE(output, ground_truth).item()
    psnr = 10 * np.log10(1 / mse)
    ssim = SSIM(img1, img2)
    return mse, psnr, ssim
Ejemplo n.º 8
0
    def get_images_and_metrics(self, inps, outputs,
                               targets) -> (float, float, np.ndarray):
        psnr = 0
        ssim = 0
        for i in range(len(inps)):
            inp = inps[i:i + 1]
            output = outputs[i:i + 1]
            target = targets[i:i + 1]
            inp = self.tensor2im(inp.data)
            fake = self.tensor2im(output.data)
            real = self.tensor2im(target.data)
            psnr += PSNR(fake, real)
            ssim += SSIM(fake, real, multichannel=True)
            vis_img = np.hstack((inp, fake, real))

        return psnr / len(inps), ssim / len(inps), vis_img
Ejemplo n.º 9
0
def compare_images_raw(image_a, image_b, show=False):
    bw_image_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)
    bw_image_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)

    # Ensure both images have the sime size, by downsampling the larger one
    # to the smaller one's dimensions
    if bw_image_a.shape[0] < bw_image_b.shape[0] or bw_image_a.shape[
            1] < bw_image_b.shape[1]:
        bw_image_b = cv2.resize(bw_image_b, bw_image_a.shape[::-1])
    elif bw_image_a.shape[0] > bw_image_b.shape[0] or bw_image_a.shape[
            1] > bw_image_b.shape[1]:
        bw_image_a = cv2.resize(bw_image_a, bw_image_b.shape[::-1])

    # Calculate ssim value and image
    ssim, image_ssim = SSIM(bw_image_a, bw_image_b, full=True)

    # Convert ssim value to a percentage
    similarity = 100 * (ssim + 1) / 2.0

    if show:
        show_diff(image_a, image_b, image_ssim, ssim, similarity)

    return similarity
            ])
    f_ifft = f_ifft / (M * N)
    return f_ifft


curr_dir = 'D:/Sem 7/Image Processing EE 610/IP Assignment 2/dataset/'
ground_truth_image = cv2.imread(curr_dir + 'GroundTruth3.jpg')
g = cv2.imread(curr_dir + 'Blurry1_3.png')
h = cv2.imread(curr_dir + 'K3.png')
g = g / 255.0
h = h / 255.0
ground_truth_image = ground_truth_image / 255.0

# interactive_weiner_filter(g, h)

deblurred_image = weiner_filter(g, h, 0.14)
# cv2.imwrite('D:/Sem 7/Image Processing EE 610/IP Assignment 2/weiner_1_3.png', 255*deblurred_image)
MSE = np.mean((ground_truth_image - deblurred_image)**2)
print(np.max(np.max(ground_truth_image)))
PSNR = 10 * np.log10(255.0 * 255.0 / MSE)
print("PSNR =", PSNR)
print(
    "SSIM =",
    SSIM(ground_truth_image,
         deblurred_image,
         data_range=deblurred_image.max() - deblurred_image.min(),
         multichannel=True))

cv2.imshow('image', deblurred_image)
cv2.waitKey(0)
cv2.destroyAllWindows
Ejemplo n.º 11
0
def train():
    ckpt_path = FLAGS.train_dir
    if not os.path.exists(ckpt_path):
        os.makedirs(ckpt_path)

    data = get_data(FLAGS.test_data_in_path)
    data -= 128
    if not FLAGS.with_padding:
        data = utils.padding(data)
    #data=utils.padding(data)
    #target_batch,input_batch=dataset.get_batch(flags.batch_size)
    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        model, wrong = create_model(ckpt_path, FLAGS.optimizer, sess)
        if (wrong == True):
            return
        #_,training_loss=model.step(sess,input_batch,target_batch,training=True)
        c1c2_prediction, bmp_prediction, _ = model.step(sess,
                                                        data,
                                                        data,
                                                        1,
                                                        training=False)

    #bmp_prediction
    bmp_prediction = np.reshape(bmp_prediction, bmp_prediction.shape[1:4])
    #print c1c2_prediction.shape
    out_bmp = Image.fromarray(bmp_prediction)
    out_bmp.save(FLAGS.test_bmp_out_path)

    #c1c2_prediction
    c1c2_prediction = np.reshape(c1c2_prediction, c1c2_prediction.shape[1:4])
    #print c1c2_prediction.shape
    out_bmp_c1c2 = Image.fromarray(c1c2_prediction)
    out_bmp_c1c2.save(FLAGS.test_bmp_c1c2_out_path)

    #evalute
    #bmp
    im = Image.open(FLAGS.original_data_path)
    original = np.array(im, dtype=np.uint8)
    PSNR_score = PSNR(original, bmp_prediction)
    SSIM_score, _ = SSIM(original,
                         bmp_prediction,
                         full=True,
                         multichannel=True)
    print 'BMP: PSNR:' + str(PSNR_score) + ' SSIM:' + str(SSIM_score)

    #bmp_c1c2
    PSNR_score = PSNR(original, c1c2_prediction)
    SSIM_score, _ = SSIM(original,
                         c1c2_prediction,
                         full=True,
                         multichannel=True)
    print 'BMP_C1C2: PSNR:' + str(PSNR_score) + ' SSIM:' + str(SSIM_score)

    #baseline
    bmp_bs, _, _ = bmptobmp_c1c2(bmp_prediction.astype(np.int32))
    bmp_bs = cast(bmp_bs)
    PSNR_score = PSNR(original, bmp_bs)
    SSIM_score, _ = SSIM(original, bmp_bs, full=True, multichannel=True)
    print 'Baseline: PSNR:' + str(PSNR_score) + ' SSIM:' + str(SSIM_score)
    bmp_bs = Image.fromarray(bmp_bs)
    bmp_bs.save(FLAGS.test_baseline_path)

    #bicubic
    bicubic = Image.open(FLAGS.test_data_in_path)
    bicubic = bicubic.resize(
        (bmp_prediction.shape[0], bmp_prediction.shape[1]), Image.BICUBIC)
    bicubic = np.array(bicubic, np.int32)
    bicubic, c1, c2 = bmptobmp_c1c2(bicubic)
    bicubic = cast(bicubic)
    PSNR_score = PSNR(original, bicubic)
    SSIM_score, _ = SSIM(original, bicubic, full=True, multichannel=True)
    print 'Bicubic: PSNR:' + str(PSNR_score) + ' SSIM:' + str(SSIM_score)
    bicubic = Image.fromarray(bicubic)
    bicubic.save(FLAGS.bicubic_path)
Ejemplo n.º 12
0
def structural_similarity(y_pred, y_true):

    return SSIM(y_pred, y_true);
Ejemplo n.º 13
0
def test(config, test_data_loader, gen, criterionMSE, epoch):
    avg_mse = 0
    avg_psnr = 0
    avg_ssim = 0
    for i, batch in enumerate(test_data_loader):
        x, t = Variable(batch[0]), Variable(batch[1])
        if config.cuda:
            x = x.cuda(0)
            t = t.cuda(0)

        out = gen(x)

        if epoch % config.snapshot_interval == 0:
            h = 1
            w = 6
            c = 3
            p = config.size

            allim = np.zeros((h, w, c, p, p))
            x_ = x.cpu().numpy()[0]
            t_ = t.cpu().numpy()[0]
            out_ = out.cpu().numpy()[0]
            in_rgb = x_[:3]
            in_nir = x_[3]
            t_rgb = t_[:3]
            t_cloud = t_[3]
            out_rgb = np.clip(out_[:3], -1, 1)
            out_cloud = np.clip(out_[3], -1, 1)
            allim[0, 0, :] = np.repeat(in_nir[None, :, :], repeats=3, axis=0) * 127.5 + 127.5
            allim[0, 1, :] = in_rgb * 127.5 + 127.5
            allim[0, 2, :] = out_rgb * 127.5 + 127.5
            allim[0, 3, :] = np.repeat(out_cloud[None, :, :], repeats=3, axis=0) * 127.5 + 127.5
            allim[0, 4, :] = t_rgb * 127.5 + 127.5
            allim[0, 5, :] = np.repeat(t_cloud[None, :, :], repeats=3, axis=0) * 127.5 + 127.5
            allim = allim.transpose(0, 3, 1, 4, 2)
            allim = allim.reshape((h*p, w*p, c))

            save_image(config.out_dir, allim, i, epoch)

        mse = criterionMSE(out, t)
        psnr = 10 * np.log10(1 / mse.item())

        img1 = np.tensordot(out.cpu().numpy()[0, :3].transpose(1, 2, 0), [0.298912, 0.586611, 0.114478], axes=1)
        img2 = np.tensordot(t.cpu().numpy()[0, :3].transpose(1, 2, 0), [0.298912, 0.586611, 0.114478], axes=1)
        
        ssim = SSIM(img1, img2)
        avg_mse += mse.item()
        avg_psnr += psnr
        avg_ssim += ssim
    avg_mse = avg_mse / len(test_data_loader)
    avg_psnr = avg_psnr / len(test_data_loader)
    avg_ssim = avg_ssim / len(test_data_loader)

    print("===> Avg. MSE: {:.4f}".format(avg_mse))
    print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr))
    print("===> Avg. SSIM: {:.4f} dB".format(avg_ssim))
    
    log_test = {}
    log_test['epoch'] = epoch
    log_test['mse'] = avg_mse
    log_test['psnr'] = avg_psnr
    log_test['ssim'] = avg_ssim

    return log_test
Ejemplo n.º 14
0
    input = input.cuda()
if opt.level == 0:
    out = model(input)
elif opt.level > 0:
    out = Lmodel(input)
    out = model(out)
    out = rec_inverse(out, Hmodel)
if opt.cuda:
    out = out.cpu()
out_img_y = out.data[0].numpy()
out_img_y = np.transpose(out.data[0].numpy(), (0, 2, 1))
out_img_y *= 255.0
out_img_y = out_img_y.clip(0, 255)
out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')

out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
out_img = Image.merge('YCbCr',
                      [out_img_y, out_img_cb, out_img_cr]).convert('RGB')
plt.figure()
plt.imshow(out_img, interpolation="nearest")
plt.show()
print('ours:')
print(PSNR(np.asarray(out_img), np.asarray(gt_img)))
print(SSIM(np.asarray(out_img), np.asarray(gt_img), multichannel=True))
print('Bicubic:')
print(PSNR(np.asarray(img), np.asarray(gt_img)))
print(SSIM(np.asarray(img), np.asarray(gt_img), multichannel=True))
out_img.save(opt.output_filename)
print('output image saved to ', opt.output_filename)
def main():
    
    # parse options
    parser = TestOptions()
    opts = parser.parse()
    result_dir = os.path.join(opts.result_dir, opts.name)
    orig_dir = opts.orig_dir
    blur_dir = opts.dataroot

    if not os.path.exists(result_dir):
        os.mkdir(result_dir)

    # data loader
    print('\n--- load dataset ---')
    if opts.a2b:
        dataset = dataset_single(opts, 'A', opts.input_dim_a)
    else:
        dataset = dataset_single(opts, 'B', opts.input_dim_b)
    loader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=opts.nThreads)

    # model
    print('\n--- load model ---')
    model = UID(opts)
    model.setgpu(opts.gpu)
    model.resume(opts.resume, train=False)
    model.eval()

    # test
    print('\n--- testing ---')
    for idx1, (img1,img_name) in enumerate(loader):
        print('{}/{}'.format(idx1, len(loader)))
        img1 = img1.cuda(opts.gpu).detach()
        with torch.no_grad():
            img = model.test_forward(img1, a2b=opts.a2b)
        img_name = img_name[0].split('/')
        img_name = img_name[-1]
        save_imgs(img, img_name, result_dir)
  
     # evaluate metrics
    if opts.percep == 'default':
        pLoss = PerceptualLoss(nn.MSELoss(),p_layer=36)
    elif opts.percep == 'face':
        self.perceptualLoss = networks.PerceptualLoss16(nn.MSELoss(),p_layer=30)
    else:
        self.perceptualLoss = networks.MultiPerceptualLoss(nn.MSELoss())
    
    orig_list = sorted(os.listdir(orig_dir))
    deblur_list = sorted(os.listdir(result_dir)) 
    blur_list = sorted(os.listdir(blur_dir)) 
    
    psnr = []
    ssim = []
    percp = []
    blur_psnr = []
    blur_ssim = []
    blur_percp = []

    for (deblur_img_name, orig_img_name, blur_img_name) in zip(deblur_list, orig_list, blur_list):
        deblur_img_name = os.path.join(result_dir,deblur_img_name)
        orig_img_name = os.path.join(orig_dir,orig_img_name)
        blur_img_name = os.path.join(blur_dir, blur_img_name)
        deblur_img = imread(deblur_img_name)
        orig_img = imread(orig_img_name)
        blur_img = imread(blur_img_name)
        try:
            psnr.append(PSNR(deblur_img, orig_img))
            ssim.append(SSIM(deblur_img, orig_img, multichannel=True))
            blur_psnr.append(PSNR(blur_img, orig_img))
            blur_ssim.append(SSIM(blur_img, orig_img, multichannel=True))
        except ValueError:
            print(orig_img_name)
        
        with torch.no_grad():
            temp = pLoss.getloss(deblur_img,orig_img)
            temp2 = pLoss.getloss(blur_img,orig_img)
        percp.append(temp)
        blur_percp.append(temp2)
        
    print(sum(psnr)/len(psnr))
    print(sum(ssim)/len(ssim))
    print(sum(percp)/len(percp))
    
    print(sum(blur_psnr)/len(psnr))
    print(sum(blur_ssim)/len(ssim))
    print(sum(blur_percp)/len(percp))
    return
Ejemplo n.º 16
0
def ssim(pred: np.ndarray, target: np.ndarray):
    return SSIM(target, pred)
Ejemplo n.º 17
0
	#recon=(lasso.coef_).reshape((Nx,Ny))
	#cA=lasso.coef_[0:11250]
	#cD=lasso.coef_[11250:]
	#print cA.shape
	#print cD.shape
	#recon=pywt.idwt(cA,cD,'haar','symmetric').reshape(Nx,Ny)
	#recon2=pywt.idwt(cD,cA,'haar').reshape(Nx,Ny)
	#imshow(recon)
	#show()
	'''

#8. Results
	recon=recon.astype("uint32")
	print "PSNR val is"
	Simage_=Simage_.astype("uint32")
	print PSNR(Simage_,recon)
	print "SSIM val is"
	print SSIM(Simage_,recon)
	print "NRMSE is "
	print NRMSE(Simage_,recon)
	RESULT=Image.fromarray(recon)
	#RESULT.convert('RGB')
	RESULT.show()
	#RESULT.save("recover.jpg")
	print "#absolute error#"
	err=Simage_-recon
	RESER=Image.fromarray(err)
	RESER.show()
	

	count=count-1
Ejemplo n.º 18
0
if 'places' in test_dir:
    for epoch in range(243):
        P = 0.0
        S = 0.0
        L = 0.0
        for i in range(100):
            img = Image.open(test_dir + str((epoch + 1) * 1) + '_' + str(i + 1) + '.jpg')
            gt = Image.open('places/test/'+samples[i])
            #gt = resize(gt)
            L += l1_loss(tensor(img), tensor(gt))
            img = np.array(img)
            gt = np.array(gt)

            P += PSNR(img, gt)
            S += SSIM(img, gt, multichannel=True)
        P /= 100.0
        if P > MP:
            MP = P
        S /= 100.0
        if S > MS:
            MS = S
        L /= 100.0
        if L < ML:
            ML = L
        print('Epoch{}:PSNR:{}, SSIM:{}, L1:{}'.format(epoch + 1, P, S, L))
    print('MAX PSNR:{}, MAX SSIM:{}, MIN L1:{}'.format(MP, MS, ML))
elif 'celeba' in test_dir:
    for epoch in range(20):
        P = 0.0
        S = 0.0
Ejemplo n.º 19
0
            patch = raw[x:x + stride, y:y + stride]
            scale = Image.fromarray(patch, 'L')
            scale = scale.resize((ratio, ratio), Image.BICUBIC)
            scale = np.array(scale)

            x_true = scale / 255.
            y_true = patch / 255.
            x_prime = np.expand_dims(x_true, axis=2)
            x_prime = np.expand_dims(x_prime, axis=0)
            y_pred = model.predict(x_prime)
            y_pred = y_pred[0, :, :, 0]
            mse = np.mean((y_true - y_pred)**2)
            batch_mse.append(mse)
            psnr = 20 * np.log10(1 / np.sqrt(mse))
            batch_psnr.append(psnr)
            ssim = SSIM(y_true, y_pred)
            batch_ssim.append(ssim)
            #print("Count={} Image={} MSE={} SSIM={} PSNR={}".format(count, filename, mse, ssim, psnr))

            out[x:x + stride, y:y + stride] = (y_pred * 255).astype(np.uint8)
            plt.subplot(131)
            plt.imshow(x_true, cmap='Greys')
            plt.subplot(132)
            plt.imshow(y_true, cmap='Greys')
            plt.subplot(133)
            plt.imshow(y_pred, cmap='Greys')
            plt.savefig(os.path.join(figu_dir, "t{}.png".format(count)))
            count = count + 1
    mses.append(np.mean(batch_mse))
    ssims.append(np.mean(batch_ssim))
    psnrs.append(np.mean(batch_psnr))
Ejemplo n.º 20
0
def train():
    ckpt_path = FLAGS.train_dir
    if not os.path.exists(ckpt_path):
        os.makedirs(ckpt_path)

    data = get_data(FLAGS.test_data_in_path)
    data = data.astype(np.float32) - 128
    if not FLAGS.with_padding:
        data = utils.padding(data)
    #print data
    #target_batch,input_batch=dataset.get_batch(flags.batch_size)
    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        model, wrong = create_model(ckpt_path, FLAGS.optimizer, sess)
        if (wrong == True):
            return
        #_,training_loss=model.step(sess,input_batch,target_batch,training=True)
        print data.shape
        bmp_prediction, _ = model.step(sess, data, data, 1, training=False)
        print bmp_prediction.shape
    #bmp_prediction
    bmp_prediction = np.reshape(bmp_prediction.astype(np.uint8),
                                bmp_prediction.shape[1:4])
    #print c1c2_prediction.shape
    out_bmp = Image.fromarray(bmp_prediction)
    out_bmp.save(FLAGS.test_bmp_out_path)

    #evalute
    #bmp
    im = Image.open(FLAGS.original_data_path)
    #im.save('test111.bmp')
    original = np.array(im, dtype=np.uint8)
    PSNR_score = PSNR(original, bmp_prediction)
    SSIM_score, _ = SSIM(original,
                         bmp_prediction,
                         full=True,
                         multichannel=True)
    print 'BMP: PSNR:' + str(PSNR_score) + ' SSIM:' + str(SSIM_score)

    #bicubic
    bicubic = Image.open(FLAGS.test_data_in_path)
    bicubic = bicubic.resize(
        (bmp_prediction.shape[1], bmp_prediction.shape[0]), Image.BICUBIC)
    bicubic = np.array(bicubic, dtype=np.uint8)

    #bicubic,_,_=bmptobmp_c1c2(bicubic)
    PSNR_score = PSNR(original, bicubic)
    SSIM_score, _ = SSIM(original, bicubic, full=True, multichannel=True)
    print 'Bicubic: PSNR:' + str(PSNR_score) + ' SSIM:' + str(SSIM_score)
    bicubic = Image.fromarray(bicubic)
    bicubic.save(FLAGS.bicubic_path)

    #c1c2
    c1c2_path = '/home/chenchen/sr/bmp_c1c2_sr/data/test/gt'
    c1_path = os.path.join(c1c2_path, '5_c1.bmp')
    c2_path = os.path.join(c1c2_path, '5_c2.bmp')
    bmp_c1c2_path = os.path.join(c1c2_path, '5_c1c2.bmp')

    c1 = Image.open(c1_path)
    c1 = np.array(c1, dtype=np.int32)
    c2 = Image.open(c2_path)
    c2 = np.array(c2, dtype=np.int32)
    bmp_c1c2_gt = Image.open(bmp_c1c2_path)
    bmp_c1c2_gt = np.array(bmp_c1c2_gt, dtype=np.uint8)
    bmp_c1c2 = get_bmp_c1c2_from_gt(bmp_prediction, c1, c2)
    bmp_c1c2_2, _, _ = bmptobmp_c1c2(bmp_prediction)
    PSNR_score = PSNR(original, bmp_c1c2_gt)
    SSIM_score, _ = SSIM(original, bmp_c1c2_gt, full=True, multichannel=True)
    print 'bmp_c1c2_gt: PSNR:' + str(PSNR_score) + ' SSIM:' + str(SSIM_score)

    PSNR_score = PSNR(original, bmp_c1c2)
    SSIM_score, _ = SSIM(original, bmp_c1c2, full=True, multichannel=True)
    print 'bmp_c1c2: PSNR:' + str(PSNR_score) + ' SSIM:' + str(SSIM_score)
    bmp_c1c2 = Image.fromarray(bmp_c1c2)
    bmp_c1c2.save('../data/test/bmp_c1c2_out5.bmp')

    PSNR_score = PSNR(original, bmp_c1c2_2)
    SNR_score = PSNR(original, bmp_c1c2_2)
    SSIM_score, _ = SSIM(original, bmp_c1c2_2, full=True, multichannel=True)
    print 'bmp_c1c2_2: PSNR:' + str(PSNR_score) + ' SSIM:' + str(SSIM_score)
    bmp_c1c2_2 = Image.fromarray(bmp_c1c2_2)
    bmp_c1c2_2.save('../data/test/bmp_c1c2_2_out5.bmp')
Ejemplo n.º 21
0
    percp = []
    blur_psnr = []
    blur_ssim = []
    blur_percp = []

    for (deblur_img_name, orig_img_name,
         blur_img_name) in zip(deblur_list, orig_list, blur_list):
        deblur_img_name = os.path.join(result_dir, deblur_img_name)
        orig_img_name = os.path.join(orig_dir, orig_img_name)
        blur_img_name = os.path.join(blur_dir, blur_img_name)
        deblur_img = imread(deblur_img_name)
        orig_img = imread(orig_img_name)
        blur_img = imread(blur_img_name)
        try:
            psnr.append(PSNR(deblur_img, orig_img))
            ssim.append(SSIM(deblur_img, orig_img, multichannel=True))
            blur_psnr.append(PSNR(blur_img, orig_img))
            blur_ssim.append(SSIM(blur_img, orig_img, multichannel=True))
        except ValueError:
            print(orig_img_name)

        #with torch.no_grad():
        #    temp = pLoss.getloss(deblur_img,orig_img)
        #    temp2 = pLoss.getloss(blur_img,orig_img)
        #percp.append(temp)
        #blur_percp.append(temp2)

    print("average psnr vs clear image is ", sum(psnr) / len(psnr))
    print("average ssim vs blur image is ", sum(ssim) / len(ssim))
    #print(sum(percp)/len(percp))
Ejemplo n.º 22
0
def analysis(val_loader, epoch, Hnet, Rnet, HnetD, RnetD, criterion):
    print(
        "#################################################### analysis begin ########################################################")

    Hnet.eval()
    Rnet.eval()

    HnetD.eval()
    RnetD.eval()
    import warnings
    warnings.filterwarnings("ignore")

    for i, ((secret_img, secret_target), (cover_img, cover_target)) in enumerate(val_loader, 0):

        ####################################### Cover Agnostic #######################################
        cover_imgv, container_img, secret_imgv_nh, rev_secret_img, errH, errR, diffH, diffR \
        = forward_pass(secret_img, secret_target, cover_img, cover_target, Hnet, Rnet, criterion, val_cover=1)
        secret_encoded = container_img - cover_imgv

        save_result_pic_analysis(opt.bs_secret*opt.num_training, cover_imgv.clone(), container_img.clone(), secret_imgv_nh.clone(), rev_secret_img.clone(), epoch, i, opt.validationpics)

        N, _, _, _ = rev_secret_img.shape

        cover_img_numpy = cover_imgv.clone().cpu().detach().numpy()
        container_img_numpy = container_img.clone().cpu().detach().numpy()

        cover_img_numpy = cover_img_numpy.transpose(0, 2, 3, 1)
        container_img_numpy = container_img_numpy.transpose(0, 2, 3, 1)

        rev_secret_numpy = rev_secret_img.cpu().detach().numpy()
        secret_img_numpy = secret_imgv_nh.cpu().detach().numpy()

        rev_secret_numpy = rev_secret_numpy.transpose(0, 2, 3, 1)
        secret_img_numpy = secret_img_numpy.transpose(0, 2, 3, 1)

        # PSNR
        print("Cover Agnostic")

        print("Secret APD C:", diffH.item())

        psnr = np.zeros((N, 3))
        for i in range(N):
            psnr[i, 0] = PSNR(cover_img_numpy[i, :, :, 0], container_img_numpy[i, :, :, 0])
            psnr[i, 1] = PSNR(cover_img_numpy[i, :, :, 1], container_img_numpy[i, :, :, 1])
            psnr[i, 2] = PSNR(cover_img_numpy[i, :, :, 2], container_img_numpy[i, :, :, 2])
        print("Avg. PSNR C:", psnr.mean().item())

        
        # SSIM
        ssim = np.zeros(N)
        for i in range(N):
            ssim[i] = SSIM(cover_img_numpy[i], container_img_numpy[i], multichannel=True)
        print("Avg. SSIM C:", ssim.mean().item())


        # LPIPS
        import PerceptualSimilarity.models
        model = PerceptualSimilarity.models.PerceptualLoss(model='net-lin', net='alex', use_gpu=True, gpu_ids=[0])
        lpips = model.forward(cover_imgv, container_img)
        print("Avg. LPIPS C:", lpips.mean().item())

        print("Secret APD S:", diffR.item())

        psnr = np.zeros(N)
        for i in range(N):
            psnr[i] = PSNR(secret_img_numpy[i], rev_secret_numpy[i])
        print("Avg. PSNR S:", psnr.mean().item())

        
        # SSIM
        ssim = np.zeros(N)
        for i in range(N):
            ssim[i] = SSIM(secret_img_numpy[i], rev_secret_numpy[i], multichannel=True)
        print("Avg. SSIM S:", ssim.mean().item())


        # LPIPS
        import PerceptualSimilarity.models
        model = PerceptualSimilarity.models.PerceptualLoss(model='net-lin', net='alex', use_gpu=True, gpu_ids=[0])
        lpips = model.forward(secret_imgv_nh, rev_secret_img)
        print("Avg. LPIPS S:", lpips.mean().item())


        ####################################### Cover Agnostic S' #######################################
        cover_imgv, container_img, secret_imgv_nh, rev_secret_img, errH, errR, diffH, diffR \
        = forward_pass(secret_img, secret_target, cover_img.clone().fill_(0.0), cover_target, Hnet, Rnet, criterion, val_cover=1)
        secret_encoded = container_img - cover_imgv

        N, _, _, _ = rev_secret_img.shape

        cover_img_numpy = cover_imgv.clone().cpu().detach().numpy()
        container_img_numpy = container_img.clone().cpu().detach().numpy()

        cover_img_numpy = cover_img_numpy.transpose(0, 2, 3, 1)
        container_img_numpy = container_img_numpy.transpose(0, 2, 3, 1)

        rev_secret_numpy = rev_secret_img.cpu().detach().numpy()
        secret_img_numpy = secret_imgv_nh.cpu().detach().numpy()

        rev_secret_numpy = rev_secret_numpy.transpose(0, 2, 3, 1)
        secret_img_numpy = secret_img_numpy.transpose(0, 2, 3, 1)

        # PSNR
        # psnr = 10*np.log10((1 / ((secret_img_numpy - rev_secret_numpy)**2).mean((1,2,3))))
        # print("Avg. PSNR:", psnr.mean())
        print()
        print("Cover Agnostic S'")

        print("Secret APD S':", diffR.item())

        psnr = np.zeros(N)
        for i in range(N):
            psnr[i] = PSNR(secret_img_numpy[i], rev_secret_numpy[i])
        print("Avg. PSNR S':", psnr.mean().item())

        
        # SSIM
        ssim = np.zeros(N)
        for i in range(N):
            ssim[i] = SSIM(secret_img_numpy[i], rev_secret_numpy[i], multichannel=True)
        print("Avg. SSIM S':", ssim.mean().item())


        # LPIPS
        import PerceptualSimilarity.models
        model = PerceptualSimilarity.models.PerceptualLoss(model='net-lin', net='alex', use_gpu=True, gpu_ids=[0])
        lpips = model.forward(secret_imgv_nh, rev_secret_img)
        print("Avg. LPIPS S':", lpips.mean().item())


        ####################################### Cover Dependent #######################################
        opt.cover_dependent = True
        cover_imgv, container_img, secret_imgv_nh, rev_secret_img, errH, errR, diffH, diffR \
        = forward_pass(secret_img, secret_target, cover_img, cover_target, HnetD, RnetD, criterion, val_cover=1)
        secret_encoded = container_img - cover_imgv

        N, _, _, _ = rev_secret_img.shape

        cover_img_numpy = cover_imgv.clone().cpu().detach().numpy()
        container_img_numpy = container_img.clone().cpu().detach().numpy()

        cover_img_numpy = cover_img_numpy.transpose(0, 2, 3, 1)
        container_img_numpy = container_img_numpy.transpose(0, 2, 3, 1)

        rev_secret_numpy = rev_secret_img.cpu().detach().numpy()
        secret_img_numpy = secret_imgv_nh.cpu().detach().numpy()

        rev_secret_numpy = rev_secret_numpy.transpose(0, 2, 3, 1)
        secret_img_numpy = secret_img_numpy.transpose(0, 2, 3, 1)

        # PSNR
        # psnr = 10*np.log10((1 / ((secret_img_numpy - rev_secret_numpy)**2).mean((1,2,3))))
        # print("Avg. PSNR:", psnr.mean())
        print()
        print("Cover Dependent")

        print("Secret APD C:", diffH.item())

        psnr = np.zeros(N)
        for i in range(N):
            psnr[i] = PSNR(cover_img_numpy[i], container_img_numpy[i])
        print("Avg. PSNR C:", psnr.mean().item())

        
        # SSIM
        ssim = np.zeros(N)
        for i in range(N):
            ssim[i] = SSIM(cover_img_numpy[i], container_img_numpy[i], multichannel=True)
        print("Avg. SSIM C:", ssim.mean().item())


        # LPIPS
        import PerceptualSimilarity.models
        model = PerceptualSimilarity.models.PerceptualLoss(model='net-lin', net='alex', use_gpu=True, gpu_ids=[0])
        lpips = model.forward(cover_imgv, container_img)
        print("Avg. LPIPS C:", lpips.mean().item())

        print("Secret APD S:", diffR.item())

        psnr = np.zeros(N)
        for i in range(N):
            psnr[i] = PSNR(secret_img_numpy[i], rev_secret_numpy[i])
        print("Avg. PSNR S:", psnr.mean().item())

        
        # SSIM
        ssim = np.zeros(N)
        for i in range(N):
            ssim[i] = SSIM(secret_img_numpy[i], rev_secret_numpy[i], multichannel=True)
        print("Avg. SSIM S:", ssim.mean().item())


        # LPIPS
        import PerceptualSimilarity.models
        model = PerceptualSimilarity.models.PerceptualLoss(model='net-lin', net='alex', use_gpu=True, gpu_ids=[0])
        lpips = model.forward(secret_imgv_nh, rev_secret_img)
        print("Avg. LPIPS S:", lpips.mean().item())

        break