コード例 #1
0
    def __getitem__(self, index):
        img_id = self.id_list[index]

        # get sil
        all_sil = np.array(
            PIL.Image.open(self.dataset_dir + "sil/%08d.png" % img_id))
        all_sil[all_sil < 128] = 0
        all_sil[all_sil >= 128] = 1

        # get parameters
        with open(self.dataset_dir + "/para/%08d.json" % img_id, 'rb') as fp:
            para_dic = json.load(fp)

        # get src_img and pre-processing parameters
        img_file = para_dic["img_file"]
        src_img = np.array(PIL.Image.open(self.dataset_dir + img_file))
        proc_para = para_dic["proc_para"]

        # get verts and vert_norms
        verts = np.array(para_dic["verts"])
        vert_norms = np.array(para_dic["vert_norms"])

        # get joint move and position
        joint_move = np.array(para_dic["joint_move"])
        joint_posi = para_dic["joint_posi"]

        # get anchor move and position
        achr_move = np.array(para_dic["achr_move"])
        achr_posi = para_dic["achr_posi"]

        # make source for joint net
        sil_j = np.expand_dims(all_sil[:, :, 1], 2)
        src_j = np.zeros((10, 4, 64, 64))
        for i in range(len(joint_posi)):
            crop_sil = center_crop(sil_j, joint_posi[i], 64)
            crop_img = center_crop(src_img, joint_posi[i], 64)
            crop_img = crop_img.astype(np.float)
            crop_img = crop_img - crop_img[31, 31, :]
            crop_img = np.absolute(crop_img)
            crop_img = crop_img / 255.0
            src_j[i, 0, :, :] = np.rollaxis(crop_sil, 2, 0)
            src_j[i, 1:4, :, :] = np.rollaxis(crop_img, 2, 0)

        # make source for anchor net
        src_a = None
        # commentted because prediction didn't require this
        '''
        sil_a = np.stack((all_sil[:,:,0], all_sil[:,:,2]), axis = -1)
        src_a = np.zeros((200, 2, 32, 32))
        for i in range(len(achr_posi)):
            crop_sil = center_crop(sil_a, achr_posi[i], 32)
            src_a[i,:,:,:] = np.rollaxis(crop_sil, 2, 0)
        '''
        return (src_j, src_a, src_img, joint_move, achr_move, verts,
                vert_norms, proc_para, all_sil, joint_posi, achr_posi)
コード例 #2
0
 def __getitem__(self, index):
     crt_id = self.id_list[index]
     img_id = int(np.floor(crt_id / 200))
     achr_id = crt_id % 200
     
     # get sil
     all_sil = np.array(PIL.Image.open(self.dataset_dir + 
                                   "sil/%08d.png" % img_id))
     all_sil[all_sil<128] = 0
     all_sil[all_sil>=128] = 255
     
     # get parameters
     with open (self.dataset_dir + "/para/%08d.json" % img_id, 'rb') as fp:
         para_dic = json.load(fp)
     achr_move = np.array(para_dic["achr_move"])
     achr_posi = para_dic["achr_posi"]
     
     # get source image
     img_file = para_dic["img_file"]
     src_img = np.array(PIL.Image.open(self.dataset_dir + img_file))
     
     tgt_para = achr_move[achr_id]
     tgt_para = np.expand_dims(tgt_para, 0)
     
     
     if self.sil_ver is False:
         src_sil = np.expand_dims(all_sil[:,:,2], 2)
         crop_sil = center_crop(src_sil, achr_posi[achr_id], 32)
         crop_img = center_crop(src_img, achr_posi[achr_id], 32)
         crop_img = crop_img.astype(np.int)
         crop_img = crop_img - crop_img[15, 15, :]
         crop_img = np.absolute(crop_img)
         src_in = np.concatenate((crop_sil, crop_img), axis = 2)
     else:
         # make input array for silhouette version
         src_sil = np.stack((all_sil[:,:,0], all_sil[:,:,2]), axis = -1)
         src_in = center_crop(src_sil, achr_posi[achr_id], 32)
         
     # transform as torch tensor
     src_in = PIL.Image.fromarray(src_in.astype(np.uint8))
     if self.transform != None:
         src_in = self.transform(src_in)
         
     if self.get_all is True and self.sil_ver is False:
         # get verts and vert_norms
         verts = np.array(para_dic["verts"])
         vert_norms = np.array(para_dic["vert_norms"])
         proc_para = para_dic["proc_para"]
         return (src_in, tgt_para, src_img, verts, 
                 vert_norms, proc_para, all_sil)
     else:
         return (src_in, tgt_para)
コード例 #3
0
    def __getitem__(self, index):
        crt_id = self.id_list[index]
        img_id = int(np.floor(crt_id / 10))
        joint_id = crt_id % 10

        # get sil
        all_sil = np.array(
            PIL.Image.open(self.dataset_dir + "sil/%08d.png" % img_id))
        all_sil[all_sil < 128] = 0
        all_sil[all_sil >= 128] = 255

        # get parameters
        with open(self.dataset_dir + "/para/%08d.json" % img_id, 'rb') as fp:
            para_dic = json.load(fp)
        joint_move = para_dic["joint_move"]
        joint_posi = para_dic["joint_posi"]

        # make target para
        tgt_para = np.array(joint_move[(joint_id * 2):(joint_id * 2 + 2)])

        if self.sil_ver is False:
            # make input array for image version
            img_file = para_dic["img_file"]
            src_img = np.array(PIL.Image.open(self.dataset_dir + img_file))
            src_sil = np.expand_dims(all_sil[:, :, 1], 2)
            crop_sil = center_crop(src_sil, joint_posi[joint_id], 64)
            crop_img = center_crop(src_img, joint_posi[joint_id], 64)
            crop_img = crop_img.astype(np.int)
            crop_img = crop_img - crop_img[31, 31, :]
            crop_img = np.absolute(crop_img)

            src_in = np.concatenate((crop_sil, crop_img), axis=2)
        else:
            # make input array for silhouette version
            src_sil = all_sil[:, :, :2]
            src_in = center_crop(src_sil, joint_posi[joint_id], 64)

        # transform as torch tensor
        src_in = PIL.Image.fromarray(src_in.astype(np.uint8))
        if self.transform != None:
            src_in = self.transform(src_in)

        if self.get_all is True and self.sil_ver is False:
            # get verts and vert_norms
            verts = np.array(para_dic["verts"])
            vert_norms = np.array(para_dic["vert_norms"])
            proc_para = para_dic["proc_para"]
            return (src_in, tgt_para, src_img, verts, vert_norms, proc_para,
                    all_sil)
        else:
            return (src_in, tgt_para)
コード例 #4
0
test_num = 0
src_img = np.array(PIL.Image.open(opt.outf + "std_img.jpg"))

hmr_mesh = om.read_trimesh(opt.outf + "hmr_mesh.obj")
hmr_mesh.request_vertex_normals()
hmr_mesh.update_normals()
verts = hmr_mesh.points()
vert_norms = hmr_mesh.vertex_normals()

# make input tensor for joint net
joint_posi = get_joint_posi(verts)
proj_sil = my_renderer.silhouette(verts=verts)
src_sil = np.expand_dims(proj_sil, 2)
src_j = np.zeros((10, 4, 64, 64))
for i in range(len(joint_posi)):
    crop_sil = center_crop(src_sil, joint_posi[i], 64)
    crop_img = center_crop(src_img, joint_posi[i], 64)
    crop_img = crop_img.astype(np.float)
    crop_img = crop_img - crop_img[31, 31, :]
    crop_img = np.absolute(crop_img)
    crop_img = crop_img / 255.0
    src_j[i, 0, :, :] = np.rollaxis(crop_sil, 2, 0)
    src_j[i, 1:4, :, :] = np.rollaxis(crop_img, 2, 0)

print("done")

# ==============================predict joint==============================
print("joint deform......", end='')
joint_tsr = pdt_j.predict_batch(src_j)
joint_para = np.array(joint_tsr.data.cpu())
joint_para = np.concatenate((joint_para, np.zeros((10, 1))), axis=1)