Esempio n. 1
0
def get_perceptual_distance(input_image_list,
                            output_image_list,
                            model_path=None,
                            model=None,
                            use_gpu=True):
    import PerceptualSimilarity.models.dist_model as dm
    import PerceptualSimilarity.util.util as psutil

    if model is None:
        model = dm.DistModel()
        model.initialize(
            model="net-lin",
            net="alex",
            model_path="LPIPS/PerceptualSimilarity/weights/v0.1/alex.pth",
            use_gpu=use_gpu)

    dist_scores = np.zeros((len(input_image_list), len(output_image_list)))

    for i, img_i in enumerate(input_image_list):
        for j, img_o in enumerate(output_image_list):
            if type(img_i) == str and type(img_o) == str:
                ex_i = psutil.im2tensor(psutil.load_image(img_i))
                ex_o = psutil.im2tensor(psutil.load_image(img_o))
            else:
                assert np.shape(img_i) == np.shape(img_o)
                ex_i = psutil.im2tensor(img_i)
                ex_o = psutil.im2tensor(img_o)
            dist_scores[i, j] = model.forward(ex_i, ex_o)[0]

    return dist_scores, np.mean(dist_scores), np.std(dist_scores)
Esempio n. 2
0
    def test(self):
        self.netG.eval()
        with torch.no_grad():
            if self.chop:
                self.fake_L = forward_chop(self.var_H, 1/self.scale, self.netG, min_size=160000)
            else:
                self.fake_L = self.netG(self.var_H)

            if self.is_test:  # Save Domain Distance Map
                sig = torch.nn.Sigmoid()
                __, hfc = self.DWT2(self.fake_L)
                # hfc = hfc[0] * 0.5 + 0.5
                hfc = hfc[0]
                LH, HL, HH = hfc[:, 0, :, :, :], \
                             hfc[:, 1, :, :, :], \
                             hfc[:, 2, :, :, :]
                hfc = torch.cat((LH, HL, HH), dim=1)
                realorfake = sig(self.netD(hfc)).cpu().detach().numpy()
                currentLayer_h, currentLayer_w = receptive_cal(hfc.shape[2]), receptive_cal(hfc.shape[3])
                self.realorfake = getWeights(realorfake, hfc, currentLayer_h, currentLayer_w)

            if self.val_lpips:
                fake_L, real_L = util.tensor2img(self.fake_L), util.tensor2img(self.var_L)
                fake_L, real_L = fake_L[:, :, [2, 1, 0]], real_L[:, :, [2, 1, 0]]
                fake_L, real_L = util_LPIPS.im2tensor(fake_L), util_LPIPS.im2tensor(real_L)
                self.LPIPS = self.cri_fea_lpips(fake_L, real_L)[0][0][0][0]
        self.netG.train()
def main():
    argv = sys.argv[1:]
    (input,name,seed,perplexity,gpu) = getArgs(argv)
    makeModelReadyImages(input,name)
    makePairsList(input,name)

    data=pd.read_csv("./datatables/%s_pairs_list.csv" % name)
    
    if gpu.lower() in ('yes', 'true', 't', 'y', '1'):
        gpu=True
    elif gpu.lower() in ('no', 'false', 'f', 'n', '0'):
        gpu=False
    
    use_gpu = gpu         # Whether to use GPU
    spatial = False         # Return a spatial map of perceptual distance.
                       # Optional args spatial_shape and spatial_order control output shape and resampling filter: see DistModel.initialize() for details.
## Initializing the model
    model = dm.DistModel()
# Linearly calibrated models
    #model.initialize(model='net-lin',net='squeeze',use_gpu=use_gpu,spatial=spatial)
    model.initialize(model='net-lin',net='alex',use_gpu=use_gpu,spatial=spatial)
    #model.initialize(model='net-lin',net='vgg',use_gpu=use_gpu,spatial=spatial)

# Off-the-shelf uncalibrated networks
#model.initialize(model='net',net='squeeze',use_gpu=use_gpu)
    #model.initialize(model='net',net='alex',use_gpu=use_gpu)
    #model.initialize(model='net',net='vgg',use_gpu=use_gpu)

# Low-level metrics
    #model.initialize(model='l2',colorspace='Lab')
    #model.initialize(model='ssim',colorspace='RGB')
    print('Model [%s] initialized'%model.name())
## Example usage with images
    dist=[]
    for index, row in data.iterrows():
        model_input_dir= "./model_ready_images/%s" % (name)
        img_1_path="%s/%s" % (model_input_dir, row['img1'])
        img_2_path="%s/%s" % (model_input_dir, row['img2'])
        img_1=util.load_image(img_1_path)
        img_2=util.load_image(img_2_path)
        ex_ref = util.im2tensor(img_1)
        ex_p1 = util.im2tensor(img_2)
        ex_d0 = model.forward(ex_ref,ex_p1)
        dist.append(ex_d0)
        print(ex_d0)

    data.distance=dist
    data.to_csv("./datatables/output_%s_data.csv" % name)
    makeEmb(name,perplexity,seed)
    emb_path="./datatables/%s_emb.csv" % name
    data=pd.read_csv(emb_path)
    visualize_scatter_with_images(name=name,data=data)
    sys.exit()
Esempio n. 4
0
 def test(self, tsamples=False):
     torch.cuda.empty_cache()
     self.netG.eval()
     with torch.no_grad():
         self.adaptive_weights = self.net_patchD(self.var_L)
         if self.chop:
             self.fake_H = forward_chop(self.var_L, self.scale, self.netG, min_size=320000)
         else:
             self.fake_H = self.netG(self.var_L, self.adaptive_weights)
         if not tsamples and self.val_lpips:
             fake_H, real_H = util.tensor2img(self.fake_H), util.tensor2img(self.var_H)
             fake_H, real_H = fake_H[:, :, [2, 1, 0]], real_H[:, :, [2, 1, 0]]
             fake_H, real_H = util_LPIPS.im2tensor(fake_H), util_LPIPS.im2tensor(real_H)
             self.LPIPS = self.cri_fea_lpips(fake_H, real_H)[0][0][0][0]
         self.netG.train()
Esempio n. 5
0
 def test(self):
     self.netG.eval()
     with torch.no_grad():
         if self.chop:
             self.fake_H = forward_chop(self.var_L, self.scale, self.netG)
         else:
             self.fake_H = self.netG(self.var_L)
         if self.val_lpips:
             fake_H, real_H = util.tensor2img(self.fake_H), util.tensor2img(
                 self.real_H)
             fake_H, real_H = fake_H[:, :, [2, 1, 0]], real_H[:, :,
                                                              [2, 1, 0]]
             fake_H, real_H = util_LPIPS.im2tensor(
                 fake_H), util_LPIPS.im2tensor(real_H)
             self.LPIPS = self.cri_fea_lpips(real_H, fake_H)[0][0][0][0]
     self.netG.train()
Esempio n. 6
0
def compute_lpips(gt_path, inp_path, version='0.0', use_gpu=True):
    model = models.PerceptualLoss(model='net-lin',
                                  net='alex',
                                  use_gpu=use_gpu,
                                  version=version)
    img0_np = util.load_image(gt_path)
    img1_np = util.load_image(inp_path)
    img0 = util.im2tensor(img0_np)
    img1 = util.im2tensor(img1_np)
    if (use_gpu):
        img0 = img0.cuda()
        img1 = img1.cuda()

    dist01 = model.forward(img0, img1)
    if use_gpu:
        return dist01.item()
    return dist01
Esempio n. 7
0
#model.initialize(model='net',net='alex',use_gpu=use_gpu)
#model.initialize(model='net',net='vgg',use_gpu=use_gpu)

# Low-level metrics
# model.initialize(model='l2',colorspace='Lab')
# model.initialize(model='ssim',colorspace='RGB')
print('Model [%s] initialized' % model.name())

## Example usage with dummy tensors
dummy_im0 = torch.Tensor(1, 3, 64,
                         64)  # image should be RGB, normalized to [-1,1]
dummy_im1 = torch.Tensor(1, 3, 64, 64)
dist = model.forward(dummy_im0, dummy_im1)

## Example usage with images
ex_ref = util.im2tensor(util.load_image('./imgs/ex_ref.png'))
ex_p0 = util.im2tensor(util.load_image('./imgs/ex_p0.png'))
ex_p1 = util.im2tensor(util.load_image('./imgs/ex_p1.png'))
ex_d0 = model.forward(ex_ref, ex_p0)
ex_d1 = model.forward(ex_ref, ex_p1)
if not spatial:
    print('Distances: (%.3f, %.3f)' % (ex_d0, ex_d1))
else:
    print(
        'Distances: (%.3f, %.3f)' % (ex_d0.mean(), ex_d1.mean())
    )  # The mean distance is approximately the same as the non-spatial distance

    # Visualize a spatially-varying distance map between ex_p0 and ex_ref
    import pylab
    pylab.imshow(ex_d0)
    pylab.show()
Esempio n. 8
0
    formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dir0', type=str, default='./imgs/ex_dir0')
parser.add_argument('--dir1', type=str, default='./imgs/ex_dir1')
parser.add_argument('--out', type=str, default='./imgs/example_dists.txt')
parser.add_argument('--use_gpu',
                    action='store_true',
                    help='turn on flag to use GPU')
opt = parser.parse_args()

## Initializing the model
model = dm.DistModel()
model.initialize(model='net-lin', net='alex', use_gpu=opt.use_gpu)

# crawl directories
f = open(opt.out, 'w')
files = os.listdir(opt.dir0)

for file in files:
    if (os.path.exists(os.path.join(opt.dir1, file))):
        # Load images
        img0 = util.im2tensor(util.load_image(os.path.join(
            opt.dir0, file)))  # RGB image from [-1,1]
        img1 = util.im2tensor(util.load_image(os.path.join(opt.dir1, file)))

        # Compute distance
        dist01 = model.forward(img0, img1)
        print('%s: %.3f' % (file, dist01))
        f.writelines('%s: %.6f\n' % (file, dist01))

f.close()
def main(ref_dir, generated_dir, version='0.0', use_gpu=True):
    """
    Compute the mean and standard deviation of the LPIPS, PSNR and SSIM metrics over an image directory
    
    Args:
        ref_dir: reference images directory
        generated_dir: generated images directory
        version: version of LPIPS to use, default 0.0
        use_gpu: whether to use gpu for faster computation
    """

    ## Initialize the LPIPS model
    model = models.PerceptualLoss(model='net-lin',
                                  net='alex',
                                  use_gpu=use_gpu,
                                  version=version)

    files = os.listdir(ref_dir)

    lpips_list = np.empty(len(files))
    psnr_list = np.empty(len(files))
    ssim_list = np.empty(len(files))

    for i, file in enumerate(files):
        if (os.path.exists(os.path.join(generated_dir, file))):

            # Load images
            img0_np = util.load_image(os.path.join(ref_dir, file))
            img1_np = util.load_image(os.path.join(generated_dir, file))

            img0 = util.im2tensor(img0_np)
            img1 = util.im2tensor(img1_np)

            if (use_gpu):
                img0 = img0.cuda()
                img1 = img1.cuda()

            # Compute LPIPS distance
            dist01 = model.forward(img0, img1)
            lpips_list[i] = dist01

            # Compute PSNR value
            psnr = metrics.peak_signal_noise_ratio(img0_np, img1_np)
            psnr_list[i] = psnr

            # Compute SSIM value
            ssim = metrics.structural_similarity(img0_np,
                                                 img1_np,
                                                 multichannel=True)
            ssim_list[i] = ssim

            print('%s: %.4f, %.4f, %.4f' % (file, dist01, psnr, ssim))

    print("LPIPS mean: {:.4f}".format(lpips_list.mean()))
    print("LPIPS std: {:.4f}".format(lpips_list.std()))

    print("PSNR mean: {:.4f}".format(psnr_list.mean()))
    print("PSNR std: {:.4f}".format(psnr_list.std()))

    print("SSIM mean: {:.4f}".format(ssim_list.mean()))
    print("SSIM std: {:.4f}".format(ssim_list.std()))
Esempio n. 10
0
ref = imageio.imread("Images/plumeReference.png")[...,:3]
plumeA = imageio.imread("Images/plumeA.png")[...,:3]
plumeB = imageio.imread("Images/plumeB.png")[...,:3]

distA_LSiM = modelLSiM.computeDistance(ref, plumeA, interpolateTo=224, interpolateOrder=0)
distB_LSiM = modelLSiM.computeDistance(ref, plumeB, interpolateTo=224, interpolateOrder=0)

distA_L2 = modelL2.computeDistance(ref, plumeA)
distB_L2 = modelL2.computeDistance(ref, plumeB)

distA_SSIM = modelSSIM.computeDistance(ref, plumeA)
distB_SSIM = modelSSIM.computeDistance(ref, plumeB)


# convert numpy arrays to tensor for the LPIPS model
tensRef = util.im2tensor(ref)
tensPlumeA = util.im2tensor(plumeA)
tensPlumeB = util.im2tensor(plumeB)

distA_LPIPS = modelLPIPS.forward(tensRef, tensPlumeA)
distB_LPIPS = modelLPIPS.forward(tensRef, tensPlumeB)

# print distance results
print("LSiM   --  PlumeA: %0.4f  PlumeB: %0.4f" % (distA_LSiM, distB_LSiM))
print("L2     --  PlumeA: %0.4f  PlumeB: %0.4f" % (distA_L2, distB_L2))
print("SSIM   --  PlumeA: %0.4f  PlumeB: %0.4f" % (distA_SSIM, distB_SSIM))
print("LPIPS  --  PlumeA: %0.4f  PlumeB: %0.4f" % (distA_LPIPS, distB_LPIPS))

# distance results should look like this (on CPU and GPU):
#LSiM   --  PlumeA: 0.3791  PlumeB: 0.4433
#L2     --  PlumeA: 0.0708  PlumeB: 0.0651
import argparse
from PerceptualSimilarity.models import dist_model as dm
from PerceptualSimilarity.util import util

parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--path0', type=str, default='./imgs/ex_ref.png')
parser.add_argument('--path1', type=str, default='./imgs/ex_p0.png')
parser.add_argument('--use_gpu', action='store_true', help='turn on flag to use GPU')
opt = parser.parse_args()

## Initializing the model
model = dm.DistModel()
model.initialize(model='net-lin',net='alex',use_gpu=opt.use_gpu)

# Load images
img0 = util.im2tensor(util.load_image(opt.path0)) # RGB image from [-1,1]
img1 = util.im2tensor(util.load_image(opt.path1))

# Compute distance
dist01 = model.forward(img0,img1)
print('Distance: %.3f'%dist01)