Пример #1
0
    def save_batch_images(self,
                          name,
                          image,
                          idx,
                          image_pred=None,
                          image_target=None,
                          pose_pred=None,
                          pose_gt=None):

        name = name + "_images"
        self.record_index(name, idx)
        dir_path = os.path.join(self.path, name)
        ensure_dir(dir_path)
        path = os.path.join(dir_path, '%s.npy' % idx)
        image = image_pytorch_to_numpy(image, True)
        np.save(path, image)
        if image_target is not None:
            path = os.path.join(dir_path, '%sT.npy' % idx)
            image_target = image_pytorch_to_numpy(image_target, True)
            np.save(path, image_target)
        if image_pred is not None:
            path = os.path.join(dir_path, '%sT_gt.npy' % idx)
            image_pred = image_pytorch_to_numpy(image_pred, True)
            np.save(path, image_pred)
        if pose_pred is not None:
            path = os.path.join(dir_path, '%spose.npy' % idx)
            pose_pred = tensor_to_numpy(pose_pred)
            np.save(path, pose_pred)
        if pose_gt is not None:
            path = os.path.join(dir_path, '%spose_gt.npy' % idx)
            pose_gt = tensor_to_numpy(pose_gt)
            np.save(path, pose_gt)
    def __init__(self, faces):

        super().__init__()
        faces = tensor_to_numpy(faces.cpu())
        faces = np.expand_dims(faces, 0)
        faces = np.repeat(faces,repeats=128*4, axis=0)
        self.faces = numpy_to_tensor(faces).int() # needs to be int (see neural renderer)
        self.faces = torch.cat((self.faces, self.faces[:, :, list(reversed(range(self.faces.shape[-1])))]), dim=1)
        self.reverse_rows = numpy_to_long(np.array(list(reversed(range(IM_SIZE)))))
Пример #3
0
    def forward(self, pose_pred, pose_label):

        assert pose_pred.size()[2] == 3
        assert pose_label.size()[2] == 3
        #stand=torch.mul(pose_pred, pose_pred).sum(dim=2).sum(dim=1)
        pose_pred = tensor_to_numpy(pose_pred)
        pose_label = tensor_to_numpy(pose_label)
        batch_size = pose_label.shape[0]
        #pose1_lst=[]
        #pose2_lst=[]
        R_lst = []
        for i in range(batch_size):
            #pose1, pose2, disparity = procrustes(pose_label[i], pose_pred[i])
            #pose1_lst.append(numpy_to_tensor_float(pose1))
            #pose2_lst.append(numpy_to_tensor_float(pose2))
            R, _ = orthogonal_procrustes(pose_label[i], pose_pred[i])
            R_lst.append(numpy_to_tensor_float(R))
        #pose1_stacked=torch.stack(pose1_lst, dim=0)*stand.view(-1,1,1)
        #pose2_stacked= torch.stack(pose2_lst, dim=0)*stand.view(-1,1,1)
        R_lst = torch.stack(R_lst, dim=0)
        one = torch.bmm(numpy_to_tensor_float(pose_label), R_lst)

        return self.MPJ(one, numpy_to_tensor_float(pose_pred))
    idx=0

    ten=(torch.arange(10, dtype=torch.int32) * 3).cuda()[:, None, None]
    print(ten[:,0,0])
    d = Drawer()
    parser= EncParser("pars")
    arg=parser.get_arguments()
    c = SMPL_Data(arg,5)
    dic=c[el]
    el_idx = el * arg.batch_size
    s,act,sub,ca,fno = c.index_file[el_idx+idx]
    print(s,act,sub,ca,fno)
    joints=dic['joints_im']

    im = dic['image']
    im= tensor_to_numpy(im).transpose(0,2,3,1)
    im=im[idx]
    from utils.smpl_torch.pytorch.smpl_layer import SMPL_Layer
    from utils.smpl_torch.display_utils import Drawer as DrawerS
    smpl_layer = SMPL_Layer(
        center_idx=0,
        gender='neutral',
        model_root='data/models_smpl')
    ds = DrawerS(kintree_table=smpl_layer.kintree_table)
    # Generate random pose and shape parameters
    batch_size=1
    pose_params = torch.rand(batch_size, 72)
    shape_params_1 = numpy_to_tensor_float(np.array([-0.8954, -0.0059, -0.3579, -0.2993, -0.4143, -0.3281, -0.1679, -0.1420,
         0.0835,  0.5819]))
    shape_params =torch.rand(batch_size, 72)*2
Пример #5
0
        smpl_layer.cuda()
    # Forward from the SMPL layer
    verts, Jtr = smpl_layer(pose_params, th_betas=shape_params)

    root=np.reshape(b['joints_im'][0,:],(1,1,3))
    root=numpy_to_tensor_float(root)
    verts=from_smpl_to_h36m_world_torch(verts,root, from_camera=False, R_world_cam=None)
    dic={}
    for key in b.keys():
        if type(b[key]) == list and "idx" not in key:
            print(key)
            if len(b[key][0].shape) == 2:
                dim1,dim2=b[key][0].shape
                dic[key]=numpy_to_tensor_float(np.reshape(b[key][0],(1,dim1,dim2)))
    pix_vertices_ca = project_vertices_onto_mask(verts, dic)
    px=tensor_to_numpy(pix_vertices_ca)[0]
    plt.scatter(px[:,0], px[:,1],s=1)
    plt.show()
    plt.rcParams['axes.facecolor'] = 'black'
    plt.scatter(px[:, 0], px[:, 1], s=40,c='w')
    plt.show()






    plt.show()


Пример #6
0
from sample.losses.poses import Aligned_MPJ, MPJ, Normalised_MPJ
from utils.trans_numpy_torch import tensor_to_numpy
from utils.utils_H36M.visualise import Drawer

parser = EncParser("Encoder parser")
args_enc = parser.get_arguments()
data_train = Data_3dpose(args_enc,
                         index_file_content=['s', 'act'],
                         index_file_list=[[1], [2, 3]],
                         sampling=5,
                         randomise=False)  #8,9

loss = Aligned_MPJ()
it, out = data_train[10]
poses = out['joints_im'][:5]
R = it['R_world_im'][:5]

#now apply transormation
pose_trans = torch.bmm(poses, R.transpose(1, 2))
#uncomment line in losses.poses MPJ to output the GT and prediction
loss_al, pred, gt = loss(poses, pose_trans)

#draw
print(tensor_to_numpy(loss_al))
dr = Drawer()
fig = plt.figure()
fig = dr.poses_3d(tensor_to_numpy(gt[0]),
                  tensor_to_numpy(pred[0]),
                  fig=fig,
                  plot=True)
plt.show()
    H36M_CONF.joints.number,
    sample_metadata['R'],
    sample_metadata['T'],
    sample_metadata['f'],
    sample_metadata['c']
)




bbpx_px=bounding_box_pixel(joints_world, 0, sample_metadata['R'], sample_metadata['T'], sample_metadata['f'], sample_metadata['c'])
imwarped,trans = get_patch_image(img, bbpx_px, (256,256), 0)#np.pi/4) # in degrees rotation around z axis
trans_torch = numpy_to_tensor_float(trans.reshape(1, 2, 3))
trsft_joints_torch = transform_2d_joints_batch(joints_pix_torch, trans_torch)
trsf_joints = transform_2d_joints(joint_px, trans)
trsft_joints_torch = tensor_to_numpy(trsft_joints_torch).reshape(17,2)

plt.figure()
imwarped=c.get_image(imwarped)
plt.imshow(imwarped)
plt.axis("off")
plt.show()







#######################################################################
#test rotation