Example #1
0
def poisson_blend_old(input, output, mask):
    """
    * inputs:
        - input (torch.Tensor, required)
                Input tensor of Completion Network.
        - output (torch.Tensor, required)
                Output tensor of Completion Network.
        - mask (torch.Tensor, required)
                Input mask tensor of Completion Network.
    * returns:
                Image tensor inpainted using poisson image editing method.
    """
    num_samples = input.shape[0]
    ret = []

    # convert torch array to numpy array followed by
    # converting 'channel first' format to 'channel last' format.
    input_np = np.transpose(np.copy(input.cpu().numpy()), axes=(0, 2, 3, 1))
    output_np = np.transpose(np.copy(output.cpu().numpy()), axes=(0, 2, 3, 1))
    mask_np = np.transpose(np.copy(mask.cpu().numpy()), axes=(0, 2, 3, 1))

    # apply poisson image editing method for each input/output image and mask.
    for i in range(num_samples):
        inpainted_np = blend(input_np[i], output_np[i], mask_np[i])
        inpainted = torch.from_numpy(np.transpose(inpainted_np,
                                                  axes=(2, 0, 1)))
        inpainted = torch.unsqueeze(inpainted, dim=0)
        ret.append(inpainted)
    ret = torch.cat(ret, dim=0)
    return ret
    def poisson_blend(self, imgs1, imgs2, mask):
        out = np.zeros(imgs1.shape)

        for i in range(0, len(imgs1)):
            img1 = (imgs1[i] + 1.) / 2.0
            img2 = (imgs2[i] + 1.) / 2.0
            out[i] = np.clip((poissonblending.blend(img1, img2, 1 - mask) - 0.5) * 2, -1.0, 1.0)
            # print (np.max(out[i]), np.min(out[i]))

        return out.astype(np.float32)
    def blend(self, g_out):

        # Conduct Poission Blending to the output
        out_im = (np.array(g_out) + 1) / 2
        in_im = (np.array(self.input_images) + 1) / 2

        for i in range(len(g_out)):
            out_im[i] = poissonblending.blend(in_im[i], out_im[i],
                                              1 - self.binary_mask)

        return out_im
Example #4
0
    def poisson_blend2(self, imgs1, imgs2, mask):
        # call this while performing consistency experiment
        out = np.zeros(imgs1.shape)

        for i in range(0, len(imgs1)):
            img1 = (imgs1[i] + 1.) / 2.0
            img2 = (imgs2[i] + 1.) / 2.0
            out[i] = np.clip(
                (poissonblending.blend(img1, img2, 1 - mask[i]) - 0.5) * 2,
                -1.0, 1.0)

        return out.astype(np.float32)
Example #5
0
    def postprocess(self, g_out):

        images_out = (np.array(g_out) + 1) / 2.0
        images_in = (np.array(self.images_data) + 1) / 2.0

        for i in range(len(g_out)):
            images_out[i] = poissonblending.blend(images_in[i], images_out[i],
                                                  1 - self.bin_mask)

        else:
            images_out = np.multiply(images_out,
                                     1 - self.masks_data) + np.multiply(
                                         images_in, self.masks_data)

        return images_out
Example #6
0
    def leftshift(self):
        a = self.left_list[0]
        for b in self.left_list[1:]:
            H = self.matcher_obj.match(a, b, 'left')
            xh = np.linalg.inv(H)
            rt = np.dot(xh, np.array([a.shape[1], 0, 1])) # right top
            rt = rt/rt[-1]
            lb = np.dot(xh, np.array([0, a.shape[0], 1])) # left bottom
            lb = lb/lb[-1]
            ds = np.dot(xh, np.array([a.shape[1], a.shape[0], 1])) # right bottom
            ds = ds/ds[-1]
            f1 = np.dot(xh, np.array([0,0,1])) # left top
            f1 = f1/f1[-1]
            offsetx = min(lb[0], f1[0], 0)
            offsety = min(rt[1], f1[1], 0)
            if(offsetx < 0):
                xh[0][-1] += abs(offsetx)
                offsetx = math.ceil(abs(offsetx))
            if(offsety < 0):
                xh[1][-1] += abs(offsety)
                offsety = math.ceil(abs(offsety))
            d_y = max(b.shape[0], math.ceil(lb[1]), math.ceil(ds[1]))
            dsize = (offsetx + b.shape[1], offsety + d_y)
            # tmp = cv2.warpPerspective(a, xh, dsize, flags = cv2.INTER_CUBIC)
            tmp = gt.perspectivetrans(a, xh, dsize)
            
            # Poisson-blending
            mask = cv2.warpPerspective(np.ones(a.shape, dtype=np.uint8)*255, xh, dsize, flags = cv2.INTER_CUBIC)
            mask[offsety:b.shape[0]+offsety, offsetx+10:b.shape[1]+offsetx] = np.zeros((b.shape[0], b.shape[1]-10, 3), dtype=np.uint8)
            # cv2.imwrite('mask.jpg', mask)
            blend_img = np.zeros(tmp.shape, dtype=np.uint8)
            blend_img[offsety:b.shape[0]+offsety, offsetx:b.shape[1]+offsetx] = b
            black_mask = np.zeros(tmp.shape, dtype=np.uint8)
            for i in range(0, tmp.shape[0]):
                for j in range(0, tmp.shape[1]):
                    if (not np.array_equal(tmp[i, j], np.array([0, 0, 0]))) or (not np.array_equal(blend_img[i, j], np.array([0, 0, 0]))):
                        black_mask[i, j] = (1, 1, 1);
            tmp = p_b.blend(blend_img, tmp, mask)
            for i in range(0, tmp.shape[0]):
                for j in range(0, tmp.shape[1]):
                    if np.array_equal(black_mask[i, j], np.array([0, 0, 0])):
                        tmp[i, j] = (0, 0, 0);
            
            # directly stitch
            # tmp[offsety:b.shape[0]+offsety, offsetx:b.shape[1]+offsetx] = b
            a = tmp

        self.leftImage = tmp
Example #7
0
def poisson_blend_old(input, output, mask):
    """
    * inputs:
        - input (torch.Tensor, required)
                Input tensor of Completion Network.  [N, C, W, H]
        - output (torch.Tensor, required)
                Output tensor of Completion Network.
        - mask (torch.Tensor, required)
                Input mask tensor of Completion Network.
    * returns:
                Image tensor inpainted using poisson image editing method.
    """
    num_samples = input.shape[0]
    ret = []

    # convert torch array to numpy array followed by
    # converting 'channel first' format to 'channel last' format.(N, C, W, H)
    input_np = np.transpose(np.copy(input.cpu().numpy()),
                            axes=(0, 2, 3, 1))  # (N, W, H, C)
    output_np = np.transpose(np.copy(output.cpu().numpy()), axes=(0, 2, 3, 1))
    mask_np = np.transpose(np.copy(mask.cpu().numpy()), axes=(0, 2, 3, 1))

    # apply poisson image editing method for each input/output image and mask.
    for i in range(num_samples):
        inpainted_np = blend(input_np[i], output_np[i],
                             mask_np[i])  # (W, H, C)
        inpainted = torch.from_numpy(np.transpose(inpainted_np,
                                                  axes=(2, 0, 1)))  # (C, W, H)
        inpainted = torch.unsqueeze(inpainted, dim=0)  # (1, C, W, H)
        ret.append(inpainted)
    ret = torch.cat(ret, dim=0)
    return ret


# if __name__ == '__main__':
#     x = torch.ones([100, 3, 10, 8])  # N=100, C=3, W=10, H=8
#     y = np.transpose(np.copy(x.cpu().numpy()), axes=(0, 2, 3, 1))
#     print(y.shape)   # (N,W,H,C)
#     print(y[0].shape)
#     z = torch.from_numpy(np.transpose(y[0], axes=(2, 0, 1)))
#     print(z.shape)
#     print(type(z))
#     wc = torch.unsqueeze(z, dim=0)
#     print(wc.shape)
Example #8
0
 def rightshift(self):
     for each in self.right_list:
         H = self.matcher_obj.match(self.leftImage, each, 'right')
         rt = np.dot(H, np.array([each.shape[1], 0, 1])) # right top
         rt = rt/rt[-1]
         lb = np.dot(H, np.array([0, each.shape[0], 1])) # left bottom
         lb = lb/lb[-1]
         f1 = np.dot(H, np.array([0,0,1])) # left top
         f1 = f1/f1[-1]
         ds = np.dot(H, np.array([each.shape[1], each.shape[0], 1])) # right bottom
         ds = ds/ds[-1]
         dsize = (int(max(rt[0], ds[0])), int(max(lb[1], ds[1], self.leftImage.shape[0])))
         # tmp = cv2.warpPerspective(each, H, dsize, flags = cv2.INTER_CUBIC)
         tmp = gt.perspectivetrans(each, H, dsize)
         
         # Poisson-blending
         c = self.leftImage.shape[1]-1
         for i in range(0, self.leftImage.shape[0]-1):
             if not np.array_equal(self.leftImage[i, c], np.array([0, 0, 0])):
                 if np.array_equal(self.leftImage[i-1, c], np.array([0, 0, 0])):
                     r1 = i
             if not np.array_equal(self.leftImage[i, c], np.array([0, 0, 0])):
                 if np.array_equal(self.leftImage[i+1, c], np.array([0, 0, 0])):
                     r2 = i+1
         mask = cv2.warpPerspective(np.ones(each.shape, dtype=np.uint8)*255, H, dsize, flags = cv2.INTER_CUBIC)
         mask[r1:r2, :self.leftImage.shape[1]] = np.zeros((r2-r1, self.leftImage.shape[1], 3), dtype=np.uint8)
         blend_img = np.zeros(tmp.shape, dtype=np.uint8)
         blend_img[:self.leftImage.shape[0], :self.leftImage.shape[1]] = self.leftImage
         black_mask = np.zeros(tmp.shape, dtype=np.uint8)
         for i in range(0, tmp.shape[0]):
             for j in range(0, tmp.shape[1]):
                 if (not np.array_equal(tmp[i, j], np.array([0, 0, 0]))) or (not np.array_equal(blend_img[i, j], np.array([0, 0, 0]))):
                     black_mask[i, j] = (1, 1, 1);
         tmp = p_b.blend(blend_img, tmp, mask)
         for i in range(0, tmp.shape[0]):
             for j in range(0, tmp.shape[1]):
                 if np.array_equal(black_mask[i, j], np.array([0, 0, 0])):
                     tmp[i, j] = (0, 0, 0);
         
         # directly stitch
         # tmp = self.mix_and_match(self.leftImage, tmp)
         self.leftImage = tmp
    def postprocess(self, ori_imgs, gen_imgs, is_blend=True):
        outputs = np.zeros_like(ori_imgs)
        tar_imgs = np.asarray([
            utils.inverse_transform(img) for img in ori_imgs
        ])  # from (-1, 1) to (0, 1)
        sour_imgs = np.asarray([
            utils.inverse_transform(img) for img in gen_imgs
        ])  # from (-1, 1) to (0, 1)

        if is_blend is True:
            for idx in range(tar_imgs.shape[0]):
                outputs[idx] = np.clip(
                    poisson.blend(tar_imgs[idx], sour_imgs[idx],
                                  ((1. - self.model.masks[idx]) * 255.).astype(
                                      np.uint8)), 0, 1)
        else:
            outputs = np.multiply(tar_imgs, self.model.masks) + np.multiply(
                sour_imgs, 1. - self.model.masks)

        return outputs
Example #10
0
    def blend(self):
        src = np.asarray(self.src_img_manager.image_src)
        src.flags.writeable = True
        dst = np.asarray(self.dst_img_manager.image)
        dst.flags.writeable = True
        mask = np.asarray(self.src_img_manager.image_mask)
        mask.flags.writeable = True

        # poissonblending.blend takes (y, x) as offset,
        # whereas gui has (x, y) as offset values so reverse these values.
        reversed_offset = self.dst_img_manager.offset[::-1]
        blended_image = poissonblending.blend(dst, src, mask, reversed_offset)
        self.image_result = PIL.Image.fromarray(np.uint8(blended_image))
        self.image_tk_result = PIL.ImageTk.PhotoImage(self.image_result)

        result_window = Tkinter.Toplevel()
        label = Tkinter.Label(result_window, image=self.image_tk_result)
        label.image = self.image_tk_result  # for holding reference counter
        label.pack()
        save_button = Tkinter.Button(result_window, text='Save',
                                     command=self.save_result(result_window))
        save_button.pack()

        result_window.title("Blended Result")
Example #11
0
    # Display the resultinfname_maskg frame
    cv2.imshow('frame', frame_mask)

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

I = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

if doit:
    nbsample = 500
    off = (0, 0)
    img_ret4 = blend(img_target,
                     I,
                     img_mask,
                     reg=5,
                     nbsubsample=nbsample,
                     offset=off,
                     adapt=adapt_list[idimg],
                     verbose=True)

    #%%
    fs = 30
    f, axarr = pl.subplots(1, 3, figsize=(30, 10))
    newax = f.add_axes([0.15, 0, 0.32, 0.32], anchor='NW', zorder=1)
    newax.imshow(img_mask)
    newax.axis('off')
    newax.set_title('mask')
    axarr[0].imshow(I)
    axarr[0].set_title('Source', fontsize=fs)
    axarr[0].axis('off')
    axarr[1].imshow(img_target)
    print('using GPU...')
    model.cuda()
    input = input.cuda()

# evaluate
res = model.forward(input)[0].cpu()

# make out
for i in range(3):
    I[i, :, :] = I[i, :, :] + datamean[i]

out = res.float() * M_3ch.float() + I.float() * (M_3ch * (-1) + 1).float()

# post-processing
if opt.postproc:
    print('post-postprocessing...')
    target = input_img  # background
    source = tensor2cvimg(out.numpy())  # foreground
    mask = input_mask
    out = blend(target, source, mask, offset=(0, 0))

    out = torch.from_numpy(cvimg2tensor(out))

# save images
print('save images...')
vutils.save_image(out, 'out.png', normalize=True)
# vutils.save_image(Im, 'masked_input.png', normalize=True)
# vutils.save_image(M_3ch, 'mask.png', normalize=True)
# vutils.save_image(res, 'res.png', normalize=True)
print('Done')
        return 100
    return 20 * math.log10(255.0 / math.sqrt(mse))


# In[]:
for k, v in dic.items():
    img = cv2.cvtColor(cv2.imread(train_path_dataset + k), cv2.COLOR_BGR2RGB)
    img_gt = cv2.cvtColor(cv2.imread(train_gt_path_dataset + k),
                          cv2.COLOR_BGR2RGB)
    img_pred = cv2.cvtColor(cv2.imread(preds_path + k), cv2.COLOR_BGR2RGB)

    mask = np.zeros((600, 500, 3), dtype=np.uint8)
    for bbox in v:
        mask[bbox[0]:bbox[2], bbox[1]:bbox[3]] = 255

    out = blend(img.copy(), img_pred.copy(), mask.copy(), offset=(0, 0))
    print("BEFORE: {}, AFTER: {}".format(psnr(img_gt, img_pred),
                                         psnr(img_gt, out)))
#plt.imshow(img)
#psnr(img_pred, img_gt)

# In[]:

# In[]:

# In[]:

# In[]:

# In[]:
Example #14
0
import numpy as np
import PIL.Image
import pylab as pl

from poissonblending import blend

img_mask = np.asarray(PIL.Image.open('./data/me_mask.png'))
img_mask = img_mask[:, :, :3]  # remove alpha
img_source = np.asarray(PIL.Image.open('./data/me.png'))
img_source = img_source[:, :, :3]  # remove alpha
img_target = np.asarray(PIL.Image.open('./data/target.png'))

nbsample = 500
off = (35, -15)

img_ret1 = blend(img_target, img_source, img_mask, offset=off)
img_ret3 = blend(img_target,
                 img_source,
                 img_mask,
                 reg=5,
                 eta=1,
                 nbsubsample=nbsample,
                 offset=off,
                 adapt='linear')
img_ret4 = blend(img_target,
                 img_source,
                 img_mask,
                 reg=5,
                 eta=1,
                 nbsubsample=nbsample,
                 offset=off,
    def poissonblending(image1, image2, mask):
        """
        We are using code from poissonblending. 
		Need to give the original code credits. 
        """
        return blending.blend(image1, image2, 1 - mask)
Example #16
0
#frame_mask2= cv2.cvtColor((img_mask2>0)*frame, cv2.COLOR_BGR2RGB)
I = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#
#pl.figure(1)
#pl.subplot(211)
#pl.imshow(I)
#pl.subplot(212)
#pl.imshow(frame_mask2)
#
#pl.show()
nbsample = 500
off = (0, 0)
img_ret4 = blend(img_target,
                 I,
                 img_mask,
                 reg=5,
                 nbsubsample=nbsample,
                 offset=off,
                 adapt='kernel',
                 verbose=True)

#%%
fs = 30
f, axarr = pl.subplots(1, 3, figsize=(30, 10))
newax = f.add_axes([0.15, 0, 0.32, 0.32], anchor='NW', zorder=1)
newax.imshow(img_mask)
newax.axis('off')
newax.set_title('mask')
axarr[0].imshow(I)
axarr[0].set_title('Source', fontsize=fs)
axarr[0].axis('off')
axarr[1].imshow(img_target)
Example #17
0
def gl_inpaint(input_img, mask, datamean, model, postproc, device):
    # load data
    # print('image.shape: {}'.format(image.shape))
    # input_img = cv2.imread(image)
    I = torch.from_numpy(cvimg2tensor(input_img)).float().to(device)

    if mask.shape[2] == 3:
        input_mask = mask
        M = torch.from_numpy(
            cv2.cvtColor(input_mask, cv2.COLOR_BGR2GRAY) /
            255).float().to(device)
        # print('M.shape: {}'.format(M.shape))
        M[M <= 0.2] = 0.0
        M[M > 0.2] = 1.0
        M = M.view(1, M.size(0), M.size(1))
        assert I.size(1) == M.size(1) and I.size(2) == M.size(2)

    else:
        print('[ERROR] Mask image is invalid')

    for i in range(3):
        I[i, :, :] = I[i, :, :] - datamean[i]

# make mask_3ch
    M_3ch = torch.cat((M, M, M), 0)
    Im = I * (M_3ch * (-1) + 1)

    # set up input
    input = torch.cat((Im, M), 0)
    input = input.view(1, input.size(0), input.size(1), input.size(2)).float()

    model.to(device)
    input = input.to(device)

    # evaluate
    res = model.forward(input)

    # make out
    for i in range(3):
        I[i, :, :] = I[i, :, :] + datamean[i]

    out = res.float() * M_3ch.float() + I.float() * (M_3ch * (-1) + 1).float()

    out = out[0]
    out = np.array(out.cpu().detach()).transpose(1, 2, 0)
    out = out[:, :, [2, 1, 0]]
    # cv2.imshow('out_before', out)
    # cv2.waitKey(0)

    # post-processing
    if postproc:
        print('[INFO] Post Processing...')
        target = input_img  # background
        mask = input_mask
        out = blend(target, out, mask, offset=(0, 0))
        out = out / 255
        # cv2.imshow('out_after', out)
        # cv2.waitKey(0)


#         print(out)
#         print(out.shape)

    return out
Example #18
0
d = 20

#prepare(r'Images/Carb.nhdr', r'Images/Carb2.nhdr')
img_source, image_header = nrrd.read(r'source.nhdr')
img_target, image_header = nrrd.read(r'target.nhdr')
offset = int(img_target.shape[0] / 2 - 3 * d / 4)

sls = [(slice(None), 0, slice(None)), (slice(None), -1, slice(None)),
       (slice(None), slice(None), 0), (slice(None), slice(None), -1)]

for sl in sls:
    img_boundary = img_target[sl]
    img_boundary_source = img_source[sl]
    img_mask = np.zeros(img_boundary_source.shape)
    img_mask[1:-1, 1:-1] = 255
    boundary_res = poissonblending.blend(img_boundary,
                                         img_boundary_source,
                                         img_mask,
                                         offset=(offset, 0))
    #boundary_res[boundary_res>=0.5] = 1
    #boundary_res[boundary_res<0.5] = 0
    img_ret = PIL.Image.fromarray(np.uint8(boundary_res))
    img_ret.save('./testimages/bound' + str(sls.index(sl)) + '.png')
    img_target[sl] = boundary_res

img_mask = np.zeros(img_source.shape)
img_mask[1:-1, 1:-1, 1:-1] = 255
data = poisson3d.blend(img_target, img_source, img_mask, offset=(offset, 0, 0))
options = {'encoding': 'raw'}
nrrd.write(r'result.nhdr', data, options=options)