Esempio n. 1
0
    def get_current_visuals_test(self):
        height, width = self.input_P1.size(2), self.input_P1.size(3)
        input_P1 = util.tensor2im(self.input_P1.data)

        input_BP1 = util.draw_pose_from_map(self.input_BP1.data)[0]
        input_BP2 = util.draw_pose_from_map(self.input_BP2.data)[0]

        bg_img_fill_ = util.tensor2im(self.bg_fill.data)
        out_patn_img_ = util.tensor2im(self.out_patn_img.data)

        target_img_fg_ = util.tensor2im(self.target_img_fg.data)
        fake_p2 = util.tensor2im(self.fake_p2.data)
        mask_2 = (self.mask_2[0, :, :, :]).detach().cpu().numpy()
        mask_2_orig = np.transpose(mask_2, (1, 2, 0))
        mask_2 = ((np.transpose(mask_2, (1, 2, 0))) * 255).astype(np.uint8())

        vis = np.zeros((height, width * 7, 3)).astype(np.uint8)  #h, w, c
        vis[:, :width, :] = input_P1
        vis[:, width:width * 2, :] = input_BP1
        vis[:, width * 2:width * 3, :] = input_BP2
        vis[:, width * 3:width * 4, :] = bg_img_fill_
        vis[:, width * 4:width * 5, :] = out_patn_img_
        vis[:, width * 5:width * 6, :] = mask_2
        vis[:, width * 6:width * 7, :] = fake_p2
        ret_visuals = OrderedDict([('vis', vis), ('image', fake_p2),
                                   ('FG', target_img_fg_),
                                   ('mask', mask_2_orig)])
        return ret_visuals
Esempio n. 2
0
    def get_current_visuals(self):
        height, width = self.input_H1.size(2), self.input_H1.size(3)
        input_H1 = util.tensor2im(self.input_H1.data)
        input_H2 = util.tensor2im(self.input_H2.data)

        input_P1 = util.draw_pose_from_map(self.input_P1.data)
        input_P2 = util.draw_pose_from_map(self.input_P2.data)
        '''Added for Depth Map'''
        input_D1 = util.tensor2im(self.input_D1.data)
        input_D2 = util.tensor2im(self.input_D2.data)

        fake_p2 = util.tensor2im(self.fake_p2.data)
        '''Added for Depth Map'''
        vis = np.zeros((height, width * 7, 3)).astype(np.uint8)  #h, w, c
        vis[:, :width, :] = input_H1
        vis[:, width:width * 2, :] = input_P1
        vis[:, width * 2:width * 3, :] = input_D1

        vis[:, width * 3:width * 4, :] = input_H2
        vis[:, width * 4:width * 5, :] = input_P2
        vis[:, width * 5:width * 6, :] = input_D2

        vis[:, width * 6:, :] = fake_p2

        ret_visuals = OrderedDict([('vis', vis)])

        return ret_visuals
Esempio n. 3
0
    def get_reduced_visuals(self):
        height, width = self.input_P1.size(2), self.input_P1.size(3)
        input_P1 = util.tensor2im(self.input_P1.data)
        # input_P2 = util.tensor2im(self.input_P2.data)

        input_SPL1 = util.tensor2im(
            torch.argmax(self.input_SPL1_onehot, axis=1, keepdim=True).data,
            True)
        # input_SPL2 = util.tensor2im(torch.argmax(self.input_SPL2_onehot, axis=1, keepdim=True).data, True)

        input_KP1 = util.draw_pose_from_map(self.input_KP1.data)[0]
        input_KP2 = util.draw_pose_from_map(self.input_KP2.data)[0]

        fake_shape2 = util.tensor2im(
            torch.argmax(self.fake_parse, axis=1, keepdim=True).data, True)
        fake_p2 = util.tensor2im(self.fake_p2.data)

        vis = np.zeros((height, width * 6, 3)).astype(np.uint8)  #h, w, c
        vis[:, :width, :] = input_P1
        vis[:, width:width * 2, :] = input_KP1
        vis[:, width * 2:width * 3, :] = input_SPL1
        vis[:, width * 3:width * 4, :] = input_KP2
        vis[:, width * 4:width * 5, :] = fake_shape2
        vis[:, width * 5:, :] = fake_p2

        ret_visuals = OrderedDict([('vis', vis)])

        return ret_visuals
    def get_current_visuals(self):
        height, width = self.main_model.input_P1.size(2), self.main_model.input_P1.size(3)
        aug_pose = util.draw_pose_from_map(self.input_BP_aug.data)[0]
        part_vis = self.main_model.get_current_visuals()['vis']
        vis = np.zeros((height, width*8, 3)).astype(np.uint8) #h, w, c

        vis[:,:width*5,:] = part_vis
        vis[:,width*5:width*6,:] = aug_pose
        vis[:,width*6:width*7,:] = ((self.fake_aug + 1) / 2.0 * 255).astype(np.uint8)

        heatmap = self.main_model.heat6.data
        vis[:,width*7:width*8,:] = util.draw_pose_from_map(heatmap, 0.1)[0]
        
        ret_visuals = OrderedDict([('vis', vis)])
        return ret_visuals
    def get_current_visuals(self):
        height, width = self.input_P1.size(2), self.input_P1.size(3)
        input_P1 = util.tensor2im(self.input_P1.data)
        input_P2 = util.tensor2im(self.input_P2.data)

        input_BP1 = util.draw_pose_from_map(self.input_BP1.data)[0]
        input_BP2 = util.draw_pose_from_map(self.input_BP2.data)[0]

        fake_p2 = util.tensor2im(self.fake_p2.data)
        vis = np.zeros((height, width * 5, 3)).astype(np.uint8)  #h, w, c
        vis[:, :width, :] = input_P1
        vis[:, width:width * 2, :] = input_BP1
        vis[:, width * 2:width * 3, :] = input_P2
        vis[:, width * 3:width * 4, :] = input_BP2
        vis[:, width * 4:, :] = fake_p2

        ret_visuals = OrderedDict([('vis', vis)])

        return ret_visuals
Esempio n. 6
0
    def get_current_visuals(self):

        height, width = self.input_P1.size(2), self.input_P1.size(3)
        input_P1 = util.tensor2im(self.input_P1.data)
        input_P2 = util.tensor2im(self.input_P2.data)
        bg_img_fill_ = util.tensor2im(self.bg_fill.data)
        out_patn_img_ = util.tensor2im(self.out_patn_img.data)
        target_img_bg_ = util.tensor2im(self.target_img_bg.data)
        target_img_fg_ = util.tensor2im(self.target_img_fg.data)
        target_img_hard_fg = util.tensor2im(
            (self.mask_target_hard * self.input_P2).data)

        input_BP1 = util.draw_pose_from_map(self.input_BP1.data)[0]
        input_BP2 = util.draw_pose_from_map(self.input_BP2.data)[0]

        fake_p2 = util.tensor2im(self.fake_p2.data)
        mask_2 = (self.mask_2[0, :, :, :]).detach().cpu().numpy()
        mask_2_orig = np.transpose(mask_2, (1, 2, 0))
        mask_2 = ((np.transpose(mask_2, (1, 2, 0))) * 255).astype(np.uint8())

        mask_hard = (self.mask_target_hard[0, :, :, :]).detach().cpu().numpy()
        mask_hard = ((np.transpose(mask_hard,
                                   (1, 2, 0))) * 255).astype(np.uint8())

        vis = np.zeros((height, width * 12, 3)).astype(np.uint8)  #h, w, c
        vis[:, :width, :] = input_P1
        vis[:, width:width * 2, :] = input_BP1
        vis[:, width * 2:width * 3, :] = input_P2
        vis[:, width * 3:width * 4, :] = input_BP2
        vis[:, width * 4:width * 5, :] = fake_p2
        vis[:, width * 5:width * 6, :] = bg_img_fill_
        vis[:, width * 6:width * 7, :] = out_patn_img_
        vis[:, width * 7:width * 8, :] = mask_2
        vis[:, width * 8:width * 9, :] = target_img_bg_
        vis[:, width * 9:width * 10, :] = target_img_fg_
        vis[:, width * 10:width * 11, :] = target_img_hard_fg
        vis[:, width * 11:width * 12, :] = mask_hard

        ret_visuals = OrderedDict([('vis', vis)])

        return ret_visuals
Esempio n. 7
0
    def get_current_visuals(self):
        height, width = self.input_P1.size(2), self.input_P1.size(3)
        input_P1 = util.tensor2im(self.input_P1.data)
        input_P2 = util.tensor2im(self.input_P2.data)

        input_BP1 = util.draw_pose_from_map(self.input_BP1.data)[0]
        input_BP2 = util.draw_pose_from_map(self.input_BP2.data)[0]

        fake_p2 = util.tensor2im(self.fake_p2.data)
        # save fake result as RGB
        cv2.imwrite('./fake_results/demo/' + self.image_paths, fake_p2[:,:,::-1])

        vis = np.zeros((height, width*5, 3)).astype(np.uint8) #h, w, c
        vis[:, :width, :] = input_P1
        vis[:, width:width*2, :] = input_BP1
        vis[:, width*2:width*3, :] = input_P2
        vis[:, width*3:width*4, :] = input_BP2
        vis[:, width*4:, :] = fake_p2

        ret_visuals = OrderedDict([('vis', vis)])

        return ret_visuals
Esempio n. 8
0
import numpy as np
from PIL import Image
import torch
import util.util as util
from util.image_pool import ImagePool
import os

path_testK = '/home/zeeshan/ashish/Pose-Transfer/seed_data/draw/'
for heat_map in os.listdir(path_testK):
    name_heat_map = path_testK + heat_map 
    input_BP1 = np.load(name_heat_map)
    input_BP1 = input_BP1.reshape(1, input_BP1.shape[0], input_BP1.shape[1], input_BP1.shape[2])
    input_BP1 = torch.from_numpy(input_BP1)
    print(heat_map)
    print (input_BP1.shape)
    image_numpy = util.draw_pose_from_map(input_BP1)[0]
    image_pil = Image.fromarray(image_numpy)
    image_path = '/home/zeeshan/ashish/Pose-Transfer/seed_data/openpose_visual/'+ heat_map.split('.')[0]+'.jpg'
    image_pil.save(image_path)