Esempio n. 1
0
    def forward(self, in0, in1, retPerLayer=None):
        assert(in0.size()[0]==1) # currently only supports batchSize 1

        if(self.colorspace=='RGB'):
            value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float')
        elif(self.colorspace=='Lab'):
            value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), 
                util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
        ret_var = Variable( torch.Tensor((value,) ) )
        if(self.use_gpu):
            ret_var = ret_var.cuda()
        return ret_var
Esempio n. 2
0
    def get_current_visuals(self):
        zoom_factor = 256 / self.var_ref.data.size()[2]

        ref_img = util.tensor2im(self.var_ref.data)
        p0_img = util.tensor2im(self.var_p0.data)
        p1_img = util.tensor2im(self.var_p1.data)

        ref_img_vis = zoom(ref_img, [zoom_factor, zoom_factor, 1], order=0)
        p0_img_vis = zoom(p0_img, [zoom_factor, zoom_factor, 1], order=0)
        p1_img_vis = zoom(p1_img, [zoom_factor, zoom_factor, 1], order=0)

        return OrderedDict([('ref', ref_img_vis), ('p0', p0_img_vis),
                            ('p1', p1_img_vis)])
Esempio n. 3
0
    def forward(self, in0, in1, retPerLayer=None):
        assert in0.size()[0] == 1  # currently only supports batchSize 1

        if self.colorspace == "RGB":
            value = util.dssim(
                1.0 * util.tensor2im(in0.data),
                1.0 * util.tensor2im(in1.data),
                range=255.0,
            ).astype("float")
        elif self.colorspace == "Lab":
            value = util.dssim(
                util.tensor2np(util.tensor2tensorlab(in0.data, to_norm=False)),
                util.tensor2np(util.tensor2tensorlab(in1.data, to_norm=False)),
                range=100.0,
            ).astype("float")
        ret_var = Variable(torch.Tensor((value, )))
        if self.use_gpu:
            ret_var = ret_var.cuda()
        return ret_var
Esempio n. 4
0
ref = lpips.im2tensor(lpips.load_image(opt.ref_path))
pred = Variable(lpips.im2tensor(lpips.load_image(opt.pred_path)),
                requires_grad=True)
if (opt.use_gpu):
    with torch.no_grad():
        ref = ref.cuda()
        pred = pred.cuda()

optimizer = torch.optim.Adam([
    pred,
], lr=1e-3, betas=(0.9, 0.999))

plt.ion()
fig = plt.figure(1)
ax = fig.add_subplot(131)
ax.imshow(lpips.tensor2im(ref))
ax.set_title('target')
ax = fig.add_subplot(133)
ax.imshow(lpips.tensor2im(pred.data))
ax.set_title('initialization')

for i in range(1000):
    dist = loss_fn.forward(pred, ref)
    optimizer.zero_grad()
    dist.backward()
    optimizer.step()
    pred.data = torch.clamp(pred.data, -1, 1)

    if i % 10 == 0:
        print('iter %d, dist %.3g' % (i, dist.view(-1).data.cpu().numpy()[0]))
        pred.data = torch.clamp(pred.data, -1, 1)