コード例 #1
0
import deepsee_models
from util import util

parser = argparse.ArgumentParser(
    formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p0', '--path0', type=str, default='./imgs/ex_ref.png')
parser.add_argument('-p1', '--path1', type=str, default='./imgs/ex_p0.png')
parser.add_argument('--use_gpu',
                    action='store_true',
                    help='turn on flag to use GPU')

opt = parser.parse_args()

## Initializing the model
model = deepsee_models.PerceptualLoss(model='net-lin',
                                      net='alex',
                                      use_gpu=opt.use_gpu)

# Load images
img0 = util.im2tensor(util.load_image(opt.path0))  # RGB image from [-1,1]
img1 = util.im2tensor(util.load_image(opt.path1))

if (opt.use_gpu):
    img0 = img0.cuda()
    img1 = img1.cuda()

# Compute distance
dist01 = model.forward(img0, img1)
print('Distance: %.3f' % dist01)
コード例 #2
0
ファイル: test_network.py プロジェクト: scape1989/DeepSEE
import torch

import deepsee_models
from util import util

use_gpu = False  # Whether to use GPU
spatial = True  # Return a spatial map of perceptual distance.

# Linearly calibrated deepsee_models (LPIPS)
model = deepsee_models.PerceptualLoss(model='net-lin',
                                      net='alex',
                                      use_gpu=use_gpu,
                                      spatial=spatial)
# Can also set net = 'squeeze' or 'vgg'

# Off-the-shelf uncalibrated networks
# model = deepsee_models.PerceptualLoss(model='net', net='alex', use_gpu=use_gpu, spatial=spatial)
# Can also set net = 'squeeze' or 'vgg'

# Low-level metrics
# model = deepsee_models.PerceptualLoss(model='L2', colorspace='Lab', use_gpu=use_gpu)
# model = deepsee_models.PerceptualLoss(model='ssim', colorspace='RGB', use_gpu=use_gpu)

## Example usage with dummy tensors
dummy_im0 = torch.zeros(1, 3, 64,
                        64)  # image should be RGB, normalized to [-1,1]
dummy_im1 = torch.zeros(1, 3, 64, 64)
if (use_gpu):
    dummy_im0 = dummy_im0.cuda()
    dummy_im1 = dummy_im1.cuda()
dist = model.forward(dummy_im0, dummy_im1)
コード例 #3
0
ファイル: perceptual_loss.py プロジェクト: scape1989/DeepSEE
import deepsee_models

use_gpu = True

ref_path = './imgs/ex_ref.png'
pred_path = './imgs/ex_p1.png'

ref_img = scipy.misc.imread(ref_path).transpose(2, 0, 1) / 255.
pred_img = scipy.misc.imread(pred_path).transpose(2, 0, 1) / 255.

# Torchify
ref = Variable(torch.FloatTensor(ref_img)[None, :, :, :])
pred = Variable(torch.FloatTensor(pred_img)[None, :, :, :], requires_grad=True)

loss_fn = deepsee_models.PerceptualLoss(model='net-lin',
                                        net='vgg',
                                        use_gpu=use_gpu)
optimizer = torch.optim.Adam([
    pred,
], lr=1e-3, betas=(0.9, 0.999))

import matplotlib.pyplot as plt

plt.ion()
fig = plt.figure(1)
ax = fig.add_subplot(131)
ax.imshow(ref_img.transpose(1, 2, 0))
ax.set_title('target')
ax = fig.add_subplot(133)
ax.imshow(pred_img.transpose(1, 2, 0))
ax.set_title('initialization')