Example #1
0
    def __init__(self, batch_size, dataloader, model, build_id):
        self.batch_size = batch_size
        self.dataloader = dataloader
        self.model = model

        self.save_path = f'results/{build_id}.pt'

        self.writer = SummaryWriter(f'results/{build_id}')
        input_example = next(iter(dataloader))['uv']

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)

        self.writer.add_graph(self.model, input_example)
        self.writer.close()

        self.criterion = nn.MSELoss()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=0.0001,
                                   momentum=0.9)

        self.load_state()

        ncomps = 45
        self.mano_layer = ManoLayer(mano_root='mano/models',
                                    use_pca=False,
                                    ncomps=ncomps,
                                    flat_hand_mean=False)

        self.mano_layer.to(self.device)
        self.mano_layer.eval()
Example #2
0
    def _init_create_networks(self):
        # generator network
        self._G = self._create_generator()
        #self._G.init_weights()
        if len(self._gpu_ids) > 1:
            self._G = torch.nn.DataParallel(self._G, device_ids=self._gpu_ids)
        if torch.cuda.is_available():
            self._G.cuda()

        self._FC = self._create_fcnet()
        self._FC.init_weights()
        if len(self._gpu_ids) > 1:
            self._FC = torch.nn.DataParallel(self._FC,
                                             device_ids=self._gpu_ids)
        if torch.cuda.is_available():
            self._FC.cuda()

        # Initialize MANO layer
        mano_layer_right = ManoLayer(
            mano_root='/home/enric/libraries/manopth/mano/models/',
            side='right',
            use_pca=True,
            ncomps=45,
            flat_hand_mean=True)
        if torch.cuda.is_available():
            mano_layer_right = mano_layer_right.cuda()
        self._MANO = mano_layer_right

        # Discriminator network
        self._D = self._create_discriminator()
        self._D.init_weights()
        if torch.cuda.is_available():
            self._D.cuda()
Example #3
0
 def __init__(self, args, x, latent_dim=512, hidden_dim=1024, lstm_layers=1, attention=False, n_classes=26, backbone='resnet50', pretrain=False):
     super(EncoderConvLSTM, self).__init__()
     self.camera_s = {'mean': 753.188477, 'std': 120.778145}
     self.camera_u = {'mean': 94.337875, 'std': 13.138042}
     self.camera_v = {'mean': 92.918877, 'std': 25.438021}
     self.encoder = Encoder(latent_dim, backbone, pretrain)
     self.conv_lstm = ConvLSTM(input_size=(7, 7), input_dim=latent_dim, hidden_dim=hidden_dim, kernel_size=(3, 3), num_layers=lstm_layers, batch_first=True, bias=True, return_all_layers=False)
     self.avg_pool = nn.AdaptiveAvgPool2d(1)
     # self.output_layers = nn.Sequential(
     #     nn.Linear(hidden_dim, hidden_dim),
     #     nn.BatchNorm1d(hidden_dim, momentum=0.01),
     #     nn.ReLU(),
     #     nn.Linear(hidden_dim, n_classes)
     # )
     # 20191019 Dropout & Simple output layers for generalization
     self.output_layers = nn.Sequential(
         nn.Dropout(0.5),
         nn.Linear(hidden_dim, n_classes, bias=False)
     )
     self.mano_layerL = ManoLayer(mano_root=args.root_mano, use_pca=True, ncomps=10, flat_hand_mean=False, side='left')  # scaled to milimeters
     self.mano_layerR = ManoLayer(mano_root=args.root_mano, use_pca=True, ncomps=10, flat_hand_mean=False, side='right')  # scaled to milimeters
     # self.attention = attention
     # self.attention_layer = nn.Linear(2 * hidden_dim if bidirectional else hidden_dim, 1)  # Linear(in_features=2048, out_features=1, bias=True)
     for m in self.modules():
         if isinstance(m, nn.Conv2d):
             n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
             m.weight.data.normal_(0, math.sqrt(2. / n))
         elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
             m.weight.data.fill_(1)
             m.bias.data.zero_()
         elif isinstance(m, nn.Linear):
             m.weight.data = torch.nn.init.kaiming_normal_(m.weight.data)
Example #4
0
 def __init__(self,
              args,
              dataset_train,
              latent_dim=512,
              hidden_dim=1024,
              attention=False,
              n_classes=26,
              backbone='resnet50',
              pretrain=False):
     super(EncoderBase, self).__init__()
     self.camera_s = {
         'mean': {
             'train': dataset_train.camera_s_mean
         },
         'std': {
             'train': dataset_train.camera_s_std
         }
     }
     self.camera_u = {
         'mean': {
             'train': dataset_train.camera_u_mean
         },
         'std': {
             'train': dataset_train.camera_u_std
         }
     }
     self.camera_v = {
         'mean': {
             'train': dataset_train.camera_v_mean
         },
         'std': {
             'train': dataset_train.camera_v_std
         }
     }
     self.encoder = Encoder(latent_dim, backbone, pretrain)
     self.output_layers = nn.Sequential(
         nn.Linear(latent_dim, hidden_dim),
         nn.BatchNorm1d(hidden_dim, momentum=0.01), nn.ReLU(),
         nn.Linear(hidden_dim, n_classes))
     self.mano_layerL = ManoLayer(mano_root=args.root_mano,
                                  use_pca=True,
                                  ncomps=10,
                                  flat_hand_mean=False,
                                  side='left')  # scaled to milimeters
     self.mano_layerR = ManoLayer(mano_root=args.root_mano,
                                  use_pca=True,
                                  ncomps=10,
                                  flat_hand_mean=False,
                                  side='right')  # scaled to milimeters
     # self.attention = attention
     # self.attention_layer = nn.Linear(2 * hidden_dim if bidirectional else hidden_dim, 1)  # Linear(in_features=2048, out_features=1, bias=True)
     for m in self.modules():
         if isinstance(m, nn.Conv2d):
             n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
             m.weight.data.normal_(0, math.sqrt(2. / n))
         elif isinstance(m, nn.BatchNorm2d) or isinstance(
                 m, nn.BatchNorm1d):
             m.weight.data.fill_(1)
             m.bias.data.zero_()
Example #5
0
    def train(self, epochs):
        self.running_loss = 0.0

        for epoch in range(epochs):
            for i, sample in enumerate(tqdm(self.dataloader, 0)):
                poses = sample['poses']
                shapes = sample['shapes']

                mano_layer = ManoLayer(mano_root='mano/models',
                                       use_pca=False,
                                       ncomps=48,
                                       flat_hand_mean=False)

                mano_layer.to(self.device)
                # Forward pass through MANO layer
                _, hand_joints = mano_layer(poses, shapes)

                uv_root = sample['uv_root']
                scale = sample['scale']

                hand_joints = hand_joints.reshape([self.batch_size, -1])
                x = torch.cat((hand_joints, uv_root, scale), 1)
                x = torch.cat((x, x), 1)
                # print("x", x.shape)
                y = sample['xyz'].reshape([self.batch_size, -1])
                # print("y", y.shape)

                # print("uv", uv_root.shape)
                # print("sc", scale.shape)

                x = x.to(self.device)
                y = y.to(self.device)

                self.optimizer.zero_grad()

                y_ = self.model(x)

                loss = self.criterion(y_, y)

                loss.backward()
                self.optimizer.step()

                self.running_loss += loss.item()
                self.g_step += 1

                if self.g_step % self.save_rate == self.save_rate - 1:
                    self.running_loss /= self.save_rate
                    self.save_state()
                    self.writer.add_scalar('training loss',
                                           self.running_loss / self.save_rate,
                                           self.g_step)
                    print(self.running_loss / self.save_rate, self.g_step)
                    self.running_loss = 0.0
class ManoDatasetC(Dataset):
    def __init__(self, base_path, transform, train_indices):
        self.transform = transform

        mano_path = os.path.join(base_path, '%s_mano.json' % 'training')
        mano_list = json_load(mano_path)
        mano_array = np.array(mano_list).squeeze(1)
        mano_poses = mano_array[..., :51]

        mano_poses = mano_poses[train_indices]

        self.kde = KernelDensity(bandwidth=0.15, kernel='gaussian')
        self.kde.fit(mano_poses)

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        self.mano_layer = ManoLayer(
            mano_root='mano/models', use_pca=False, ncomps=45, flat_hand_mean=False)

        self.mano_layer.to(self.device)

    def __len__(self):
        return 32560

    def __getitem__(self, idx):
        sample = self.kde.sample()
        pose = sample[..., :48]
        shape_start = sample[..., 48:]
        shape = np.ones([1, 10])
        shape[..., :3] = shape_start

        x = {
            'p': pose,
            's': shape
        }
        x = self.transform(x)

        hand_verts, hand_joints = self.mano_layer(x['p'], x['s'])
        batch_size = hand_joints.shape[0]
        hand_joints = hand_joints.reshape([batch_size, 63])

        sample = {
            'hand_joints': torch.squeeze(hand_joints),
            'hand_verts': torch.squeeze(hand_verts),
            'poses': torch.squeeze(x['p']),
            'shapes': torch.squeeze(x['s'])
        }

        return sample
Example #7
0
    def __init__(self, side, betas):
        """Constructor.

    Args:
      side: MANO hand type. 'right' or 'left'.
      betas: A numpy array of shape [10] containing the betas.
    """
        super(MANOLayer, self).__init__()

        self._side = side
        self._betas = betas
        self._mano_layer = ManoLayer(flat_hand_mean=False,
                                     ncomps=45,
                                     side=self._side,
                                     mano_root='manopth/mano/models',
                                     use_pca=True)

        b = torch.from_numpy(self._betas).unsqueeze(0)
        f = self._mano_layer.th_faces
        self.register_buffer('b', b)
        self.register_buffer('f', f)

        v = torch.matmul(self._mano_layer.th_shapedirs, self.b.transpose(
            0, 1)).permute(2, 0, 1) + self._mano_layer.th_v_template
        r = torch.matmul(self._mano_layer.th_J_regressor[0], v)
        self.register_buffer('root_trans', r)
Example #8
0
    def __init__(self, num_Iter=500, th_beta=None, th_pose=None, lb_target=None,
                 weight=0.01):
        self.count = 0
        # self.time_start = time.time()
        # self.time_in_mano = 0
        self.minimal_loss = 9999
        self.best_beta = np.zeros([10, 1])
        self.num_Iter = num_Iter

        self.th_beta = th_beta
        self.th_pose = th_pose

        self.beta = th_beta.numpy()
        self.pose = th_pose.numpy()

        self.mano_layer = ManoLayer(side="right",
                                    mano_root='mano/models', use_pca=False, flat_hand_mean=True)

        self.threshold_stop = 10 ** -13
        self.weight = weight
        self.residual_memory = []

        self.lb = np.zeros(21)

        _, self.joints = self.mano_layer(self.th_pose, self.th_beta)
        self.joints = self.joints.numpy().reshape(21, 3)

        self.lb_target = lb_target.reshape(15, 1)
Example #9
0
    def __init__(
            self,
            dropout=0,
            _mano_root='mano/models'
    ):
        super(ShapeNet, self).__init__()

        ''' shape '''
        hidden_neurons = [128, 256, 512, 256, 128]
        in_neurons = 15
        out_neurons = 10
        neurons = [in_neurons] + hidden_neurons

        shapereg_layers = []
        for layer_idx, (inps, outs) in enumerate(
                zip(neurons[:-1], neurons[1:])
        ):
            if dropout:
                shapereg_layers.append(nn.Dropout(p=dropout))
            shapereg_layers.append(nn.Linear(inps, outs))
            shapereg_layers.append(nn.ReLU())

        shapereg_layers.append(nn.Linear(neurons[-1], out_neurons))
        self.shapereg_layers = nn.Sequential(*shapereg_layers)
        args = {'flat_hand_mean': True, 'root_rot_mode': 'axisang',
                'ncomps': 45, 'mano_root': _mano_root,
                'no_pca': True, 'joint_rot_mode': 'axisang', 'side': 'right'}
        self.mano_layer = ManoLayer(flat_hand_mean=args['flat_hand_mean'],
                                    side=args['side'],
                                    mano_root=args['mano_root'],
                                    ncomps=args['ncomps'],
                                    use_pca=not args['no_pca'],
                                    root_rot_mode=args['root_rot_mode'],
                                    joint_rot_mode=args['joint_rot_mode']
                                    )
    def __init__(self, base_path, transform, train_indices):
        self.transform = transform

        mano_path = os.path.join(base_path, '%s_mano.json' % 'training')
        mano_list = json_load(mano_path)
        mano_array = np.array(mano_list).squeeze(1)
        mano_poses = mano_array[..., :51]

        mano_poses = mano_poses[train_indices]

        self.kde = KernelDensity(bandwidth=0.15, kernel='gaussian')
        self.kde.fit(mano_poses)

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        self.mano_layer = ManoLayer(
            mano_root='mano/models', use_pca=False, ncomps=45, flat_hand_mean=False)

        self.mano_layer.to(self.device)
Example #11
0
def xyz_from_mano(poses, shapes):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Select number of principal components for pose space
    ncomps = 45

    # Initialize MANO layer
    mano_layer = ManoLayer(mano_root='mano/models',
                           use_pca=False,
                           ncomps=ncomps,
                           flat_hand_mean=False)
    mano_layer.to(device)

    poses = torch.from_numpy(poses).float().to(device)
    shapes = torch.from_numpy(shapes).float().to(device)

    # Forward pass through MANO layer
    hand_verts, hand_joints = mano_layer(poses, shapes)

    return hand_verts, hand_joints
Example #12
0
    def __init__(self,
                 device,
                 mano_root='mano/models',
                 ncomps=45,
                 seg_file='./face2label_sealed.npy',
                 visualize_interm_result=False):
        self.ncomps = ncomps
        self.device = device
        self.mano_root = mano_root
        self.mano_layer = ManoLayer(mano_root=self.mano_root,
                                    use_pca=True,
                                    ncomps=self.ncomps,
                                    flat_hand_mean=False)
        self.mano_layer = self.mano_layer.to(self.device)

        segmentation = np.load(seg_file)
        self.faces = self.mano_layer.th_faces.detach().cpu()
        # assign mano vertex label according to face label:
        self.vertex_label = np.zeros((self.faces.max() + 1), dtype=np.uint8)

        for i in range(0, self.faces.shape[0]):
            self.vertex_label[self.faces[i, 0]] = segmentation[i, 0]
            self.vertex_label[self.faces[i, 1]] = segmentation[i, 0]
            self.vertex_label[self.faces[i, 2]] = segmentation[i, 0]

        self.label_vertex = [(np.where(self.vertex_label == 0))[0], \
                            (np.where(self.vertex_label == 1))[0], \
                            (np.where(self.vertex_label == 2))[0], \
                            (np.where(self.vertex_label == 3))[0], \
                            (np.where(self.vertex_label == 4))[0], \
                            (np.where(self.vertex_label == 5))[0]]

        self.visualize_interm_result = visualize_interm_result

        self.target_points = []
        self.target_points_tree = []
        self.no_label = []
Example #13
0
def Visualize():
    grasps = glob.glob('data/grasps/obj_*')
    grasps.sort()
    np.random.seed(1)
    np.random.shuffle(grasps)

    from manopth.manolayer import ManoLayer
    mano_layer_right = ManoLayer(mano_root='data/mano/',
                                 side='right',
                                 use_pca=True,
                                 ncomps=45,
                                 flat_hand_mean=True)

    for i, grasp in tqdm.tqdm(enumerate(grasps)):
        with open(grasp, 'rb') as f:
            hand = pickle.load(f, encoding='latin')

        filename = 'data/models/' + hand['body'][36:]

        objname = str.split(filename,
                            'nontextured_transformed.wrl')[0] + 'textured.obj'
        obj = fast_load_obj(open(objname, 'rb'))[0]

        obj_verts = obj['vertices']
        obj_faces = obj['faces']

        mano_trans = hand['mano_trans']
        posesnew = np.concatenate(([hand['pca_manorot']], hand['pca_poses']),
                                  1)

        hand_vertices, _ = mano_layer_right.forward(
            th_pose_coeffs=torch.FloatTensor(posesnew),
            th_trans=torch.FloatTensor(mano_trans))
        hand_vertices = hand_vertices.cpu().data.numpy()[0] / 1000
        hand_faces = mano_layer_right.th_faces.cpu().data.numpy()

        plot_hand_w_object(obj_verts, obj_faces, hand_vertices, hand_faces)
Example #14
0
    def __init__(self, depth):
        super().__init__()

        if depth == 7:
            self.mano_encoder = Encoder7()
        else:
            self.mano_encoder = Encoder9()

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        self.mano_decoder = ManoLayer(mano_root='mano/models',
                                      use_pca=False,
                                      ncomps=45,
                                      flat_hand_mean=False).to(self.device)
Example #15
0
    def run_mano(self):
        """Runs forward_mano with numpy-pytorch-numpy handling"""
        if self.hand_pose.shape[
                0] == 48:  # Special case when we're loading GT honnotate
            mano_model = ManoLayer(mano_root='mano/models',
                                   joint_rot_mode="axisang",
                                   use_pca=False,
                                   center_idx=None,
                                   flat_hand_mean=True)
        else:  # Everything else
            mano_model = ManoLayer(mano_root='mano/models',
                                   use_pca=True,
                                   ncomps=15,
                                   side='right',
                                   flat_hand_mean=False)

        pose_tensor = torch.Tensor(self.hand_pose).unsqueeze(0)
        beta_tensor = torch.Tensor(self.hand_beta).unsqueeze(0)
        tform_tensor = torch.Tensor(self.hand_mTc).unsqueeze(0)
        mano_verts, mano_joints = util.forward_mano(mano_model, pose_tensor,
                                                    beta_tensor,
                                                    [tform_tensor])
        self.hand_verts = mano_verts.squeeze().detach().numpy()
        self.hand_joints = mano_joints.squeeze().detach().numpy()
Example #16
0
    def __init__(self, parameters, target, _mano_root='mano/models'):
        """
        particle swarm optimization
        parameter: a list type, like [NGEN, pop_size, var_num_min, var_num_max]
        """
        self.mano_layer = ManoLayer(side="right",
                                    mano_root=_mano_root,
                                    use_pca=False,
                                    flat_hand_mean=True)

        # 初始化
        self.NGEN = parameters[0]
        self.pop_size = parameters[1]
        self.var_num = parameters[2].shape[1]
        self.bound = []
        self.bound.append(parameters[2])
        self.bound.append(parameters[3])
        self.set_target(target)
Example #17
0
    def __init__(self):
        super().__init__()
        self.mano_encoder = nn.Sequential(LinearNormedReLUBlock(63, 512),
                                          LinearNormedReLUBlock(512, 1024),
                                          LinearNormedReLUBlock(1024, 1024),
                                          LinearNormedReLUBlock(1024, 1024),
                                          LinearNormedReLUBlock(1024, 1024),
                                          LinearNormedReLUBlock(1024, 1024),
                                          LinearNormedReLUBlock(1024, 1024),
                                          LinearNormedReLUBlock(1024, 1024),
                                          LinearNormedReLUBlock(1024, 512),
                                          LinearNormedReLUBlock(512, 512),
                                          nn.Linear(512, 51))

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        self.mano_decoder = ManoLayer(mano_root='mano/models',
                                      use_pca=False,
                                      ncomps=45,
                                      flat_hand_mean=False).to(self.device)
Example #18
0
def create_scene(sample, obj_file):
    """Creates the pyrender scene of an image sample.

  Args:
    sample: A dictionary holding an image sample.
    obj_file: A dictionary holding the paths to YCB OBJ files.

  Returns:
    A pyrender scene object.
  """
    # Create pyrender scene.
    scene = pyrender.Scene(bg_color=np.array([0.0, 0.0, 0.0, 0.0]),
                           ambient_light=np.array([1.0, 1.0, 1.0]))

    # Add camera.
    fx = sample['intrinsics']['fx']
    fy = sample['intrinsics']['fy']
    cx = sample['intrinsics']['ppx']
    cy = sample['intrinsics']['ppy']
    cam = pyrender.IntrinsicsCamera(fx, fy, cx, cy)
    scene.add(cam, pose=np.eye(4))

    # Load poses.
    label = np.load(sample['label_file'])
    pose_y = label['pose_y']
    pose_m = label['pose_m']

    # Load YCB meshes.
    mesh_y = []
    for i in sample['ycb_ids']:
        mesh = trimesh.load(obj_file[i])
        mesh = pyrender.Mesh.from_trimesh(mesh)
        mesh_y.append(mesh)

    # Add YCB meshes.
    for o in range(len(pose_y)):
        if np.all(pose_y[o] == 0.0):
            continue
        pose = np.vstack((pose_y[o], np.array([[0, 0, 0, 1]],
                                              dtype=np.float32)))
        pose[1] *= -1
        pose[2] *= -1
        node = scene.add(mesh_y[o], pose=pose)

    # Load MANO layer.
    mano_layer = ManoLayer(flat_hand_mean=False,
                           ncomps=45,
                           side=sample['mano_side'],
                           mano_root='manopth/mano/models',
                           use_pca=True)
    faces = mano_layer.th_faces.numpy()
    betas = torch.tensor(sample['mano_betas'],
                         dtype=torch.float32).unsqueeze(0)

    # Add MANO meshes.
    if not np.all(pose_m == 0.0):
        pose = torch.from_numpy(pose_m)
        vert, _ = mano_layer(pose[:, 0:48], betas, pose[:, 48:51])
        vert /= 1000
        vert = vert.view(778, 3)
        vert = vert.numpy()
        vert[:, 1] *= -1
        vert[:, 2] *= -1
        mesh = trimesh.Trimesh(vertices=vert, faces=faces)
        mesh1 = pyrender.Mesh.from_trimesh(mesh)
        mesh1.primitives[0].material.baseColorFactor = [0.7, 0.7, 0.7, 1.0]
        mesh2 = pyrender.Mesh.from_trimesh(mesh, wireframe=True)
        mesh2.primitives[0].material.baseColorFactor = [0.0, 0.0, 0.0, 1.0]
        node1 = scene.add(mesh1)
        node2 = scene.add(mesh2)

    return scene
Example #19
0
        expand_vertices = vertices.unsqueeze(1).expand(
            -1, new_vertices_idx_list.shape[1], -1, -1)
        expand_vertices_idx = new_vertices_idx_list.unsqueeze(-1).expand(
            -1, -1, -1, 3)
        new_verts = torch.mean(torch.gather(expand_vertices, 2,
                                            expand_vertices_idx),
                               dim=-2)
        new_verts = torch.cat([vertices, new_verts], dim=1)
        return new_verts, new_faces_list


if __name__ == "__main__":
    ncomps = 6

    mano_layer = ManoLayer(mano_root="assets/mano/",
                           use_pca=True,
                           ncomps=ncomps,
                           flat_hand_mean=False)
    bs = 5
    random_shape = torch.rand(bs, 10)
    random_pose = torch.rand(bs, ncomps + 3)

    # Forward pass through MANO layer
    hand_verts, _ = mano_layer(random_pose, random_shape)
    hand_faces = mano_layer.th_faces

    # print(hand_verts.shape, hand_faces.shape)

    # hand_mesh = o3d.geometry.TriangleMesh()
    # hand_mesh.triangles = o3d.utility.Vector3iVector(np.asarray(hand_faces))
    # hand_mesh.vertices = o3d.utility.Vector3dVector(np.asarray(hand_verts.squeeze(0)))
    # hand_mesh.compute_vertex_normals()
Example #20
0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
build_id = CONFIG_M0['build_id']

dataset = FreiHandDataset(CONFIG_M0['dataset_path'],
                          CONFIG_M0['data_version'],
                          transform=ToTensor())

dataloader = DataLoader(dataset, shuffle=True)
sample = next(iter(dataloader))
random_pose = sample['poses']
random_shape = sample['shapes']

print(random_pose.shape)
# Initialize MANO layer
mano_layer = ManoLayer(mano_root='mano/models',
                       use_pca=False,
                       ncomps=48,
                       flat_hand_mean=False)

mano_layer.to(device)
# Forward pass through MANO layer
hand_verts, hand_joints = mano_layer(random_pose, random_shape)
# demo.display_hand({
#     'verts': hand_verts,
#     'joints': hand_joints
# },
#     mano_faces=mano_layer.th_faces)

uv_root = sample['uv_root']
scale = sample['scale']
hand_joints = hand_joints.reshape([1, -1])
x = torch.cat((hand_joints, uv_root, scale), 1)
Example #21
0
class TrainerP0(object):
    def __init__(self, batch_size, dataloader, model, build_id):
        self.batch_size = batch_size
        self.dataloader = dataloader
        self.model = model

        self.save_path = f'results/{build_id}.pt'

        self.writer = SummaryWriter(f'results/{build_id}')
        input_example = next(iter(dataloader))['uv']

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)

        self.writer.add_graph(self.model, input_example)
        self.writer.close()

        self.criterion = nn.MSELoss()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=0.0001,
                                   momentum=0.9)

        self.load_state()

        ncomps = 45
        self.mano_layer = ManoLayer(mano_root='mano/models',
                                    use_pca=False,
                                    ncomps=ncomps,
                                    flat_hand_mean=False)

        self.mano_layer.to(self.device)
        self.mano_layer.eval()

    def train(self, epochs, save_rate):
        self.running_loss = 0.0

        for epoch in range(epochs):
            for i, sample in enumerate(self.dataloader, 0):
                uv = sample['uv']
                outputs_gt = sample['mano']

                uv = uv.to(self.device)
                outputs_gt = outputs_gt.to(self.device)

                self.optimizer.zero_grad()

                outputs = self.model(uv) * 3.12

                # losses = []
                # losses.append(self.criterion(outputs, outputs_gt))

                loss = self.criterion(outputs, outputs_gt)

                # poses_pred = outputs[:, :48]  # .unsqueeze(0)
                # shapes_pred = outputs[:, 48:]  # .unsqueeze(0)
                # hand_verts_p, hand_joints_p = self.mano_layer(poses_pred, shapes_pred)
                #
                # poses_gt = outputs_gt[:, :48]
                # shapes_gt = outputs_gt[:, 48:]
                # hand_verts_gt, hand_joints_gt = self.mano_layer(poses_gt, shapes_gt)

                # losses.append(self.criterion(hand_verts_p, hand_verts_gt))
                # losses.append(self.criterion(hand_joints_p, hand_joints_gt))

                # loss = sum(losses)

                loss.backward()
                self.optimizer.step()

                self.running_loss += loss.item()
                self.g_step += 1

            self.running_loss /= 128
            self.save_state()
            self.writer.add_scalar('training loss', self.running_loss / 128,
                                   self.g_step)
            print(self.running_loss / 128, self.g_step)
            self.running_loss = 0.0

    def save_state(self):
        torch.save(
            {
                'g_step': self.g_step,
                'model_state_dict': self.model.state_dict(),
                'optimizer_state_dict': self.optimizer.state_dict(),
                'running_loss': self.running_loss / 128,
            }, self.save_path)

        print(
            f'Model saved at step {self.g_step} with running loss {self.running_loss / 128}.'
        )

    def load_state(self):
        if os.path.exists(self.save_path):
            checkpoint = torch.load(self.save_path)
            self.g_step = checkpoint['g_step'] + 1
            self.running_loss = checkpoint['running_loss']

            self.model.load_state_dict(checkpoint['model_state_dict'])
            self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            print(
                f'Model loaded. g_step: {self.g_step}; running_loss: {self.running_loss}'
            )
        else:
            print(
                f'File "{self.save_path}" does not exist. Initializing parameters from scratch.'
            )
            self.g_step = 0
            self.running_loss = 0.0
Example #22
0
class pcl2mano():
    def __init__(self,
                 device,
                 mano_root='mano/models',
                 ncomps=45,
                 seg_file='./face2label_sealed.npy',
                 visualize_interm_result=False):
        self.ncomps = ncomps
        self.device = device
        self.mano_root = mano_root
        self.mano_layer = ManoLayer(mano_root=self.mano_root,
                                    use_pca=True,
                                    ncomps=self.ncomps,
                                    flat_hand_mean=False)
        self.mano_layer = self.mano_layer.to(self.device)

        segmentation = np.load(seg_file)
        self.faces = self.mano_layer.th_faces.detach().cpu()
        # assign mano vertex label according to face label:
        self.vertex_label = np.zeros((self.faces.max() + 1), dtype=np.uint8)

        for i in range(0, self.faces.shape[0]):
            self.vertex_label[self.faces[i, 0]] = segmentation[i, 0]
            self.vertex_label[self.faces[i, 1]] = segmentation[i, 0]
            self.vertex_label[self.faces[i, 2]] = segmentation[i, 0]

        self.label_vertex = [(np.where(self.vertex_label == 0))[0], \
                            (np.where(self.vertex_label == 1))[0], \
                            (np.where(self.vertex_label == 2))[0], \
                            (np.where(self.vertex_label == 3))[0], \
                            (np.where(self.vertex_label == 4))[0], \
                            (np.where(self.vertex_label == 5))[0]]

        self.visualize_interm_result = visualize_interm_result

        self.target_points = []
        self.target_points_tree = []
        self.no_label = []

    def find_nearest_neighbour_index_from_hand(self, target_points_tree, hands,
                                               no_label):
        if len(hands.shape) == 3:  # batch of hands
            closest_indices = []
            for i in range(0, hands.shape[0]):
                hand = hands[i, :, :].detach().cpu().numpy()
                _, closest_palm_index = target_points_tree[0].query(
                    hand[self.label_vertex[0], :], 1)
                _, closest_thum_index = target_points_tree[1].query(
                    hand[self.label_vertex[1], :], 1)
                _, closest_inde_index = target_points_tree[2].query(
                    hand[self.label_vertex[2], :], 1)
                _, closest_midd_index = target_points_tree[3].query(
                    hand[self.label_vertex[3], :], 1)
                _, closest_ring_index = target_points_tree[4].query(
                    hand[self.label_vertex[4], :], 1)
                _, closest_pink_index = target_points_tree[5].query(
                    hand[self.label_vertex[5], :], 1)

                closest_indices.append([closest_palm_index, closest_thum_index, closest_inde_index,\
                    closest_midd_index, closest_ring_index, closest_pink_index])

            return closest_indices
        else:  # Not used
            print("Fatal error in find_nearest_neighbour_index_from_hand!")
            exit()

    def index2points_from_hand(self, indices, closest_points):
        for batch in range(0, len(indices)):  # iterate over batch dimension
            for label in range(0, 6):  # iterate over hand parts
                closest_points[
                    batch,
                    self.label_vertex[label][:], :] = self.target_points[
                        label][indices[batch][label][:, 0], :]

        return closest_points

    def find_nearest_neighbour_to_hand(self, target_points, n, hands,
                                       no_label):
        closest_batch_hand_indices = np.zeros((0, n), dtype=np.int32)
        target_points_batch_rearrange = np.zeros((0, n, 3), dtype=np.float32)
        for i in range(0, hands.shape[0]):  # iterate over batch dimension
            hand = hands[i, :, :].detach().cpu().numpy()
            closest_hand_indices = np.zeros((0), dtype=np.int32)
            target_points_rearrange = np.zeros((0, 3), dtype=np.float32)
            for hand_part in range(0, 6):
                # If there is no predicted label of that part, skip.
                if no_label[hand_part]:
                    continue
                hand_part_vertex = hand[self.label_vertex[hand_part], :]
                hand_part_tree = KDTree(hand_part_vertex)
                _, closest_part_index = hand_part_tree.query(
                    target_points[hand_part], 1)
                closest_hand_indices = np.concatenate(
                    (closest_hand_indices,
                     self.label_vertex[hand_part][closest_part_index[:, 0]]),
                    0)
                target_points_rearrange = np.concatenate(
                    (target_points_rearrange, target_points[hand_part]))
            target_points_rearrange = np.expand_dims(target_points_rearrange,
                                                     axis=0)

            target_points_batch_rearrange = np.concatenate(
                (target_points_batch_rearrange, target_points_rearrange), 0)
            closest_hand_indices = np.expand_dims(closest_hand_indices, axis=0)
            closest_batch_hand_indices = np.concatenate(
                (closest_batch_hand_indices, closest_hand_indices), 0)

        return closest_batch_hand_indices, target_points_batch_rearrange

    def index2points_to_hand(self, indices, hands):
        hands_batch_rearrange = torch.zeros(0, indices.shape[1],
                                            3).float().to(self.device)
        for batch in range(0, indices.shape[0]):
            hand_rearrange = hands[batch, indices[batch, :], :]
            hand_rearrange = hand_rearrange.unsqueeze(0)
            hands_batch_rearrange = torch.cat(
                (hands_batch_rearrange, hand_rearrange), dim=0)  # axis = 0)
        return hands_batch_rearrange

    def mask_no_label_from_hand(self, hand_verts, no_label):
        for i in range(len(no_label)):
            if no_label[i]:
                hand_verts[:, self.label_vertex[i], :] = 0.
        return hand_verts

    def mask_no_label_to_hand(self, hand_verts, no_label):
        for i in range(len(no_label)):
            if no_label[i]:
                hand_verts[:, self.label_vertex[i], :] = 0.
        return hand_verts

    def fit_mano_2_pcl(
            self,
            samples,  # samples is a (N_v x 3) array, unit is Millimeter!
            labels,  # labels is a (N_v x 1) array
            seeds=8,
            coarse_iter=50,
            fine_iter=50,
            stop_loss=5.0,
            verbose=0):
        # classify samples according to labels
        palm = samples[(np.where(labels == 0))[0], :]
        thumb = samples[(np.where(labels == 1))[0], :]
        index = samples[(np.where(labels == 2))[0], :]
        middle = samples[(np.where(labels == 3))[0], :]
        ring = samples[(np.where(labels == 4))[0], :]
        pinky = samples[(np.where(labels == 5))[0], :]
        for lab in [palm, thumb, index, middle, ring, pinky]:
            # print(len(lab))
            # print(lab.shape)
            self.no_label.append(lab.shape[0] == 0)
        # Add temp point (0,0,0) to part with no sample.
        # The loss from these points will be masked out later when calculating loss
        if palm.shape[0] == 0: palm = np.zeros([1, 3])
        if thumb.shape[0] == 0: thumb = np.zeros([1, 3])
        if index.shape[0] == 0: index = np.zeros([1, 3])
        if middle.shape[0] == 0: middle = np.zeros([1, 3])
        if ring.shape[0] == 0: ring = np.zeros([1, 3])
        if pinky.shape[0] == 0: pinky = np.zeros([1, 3])

        # print("No label:", self.no_label)
        self.target_points_np = [palm, thumb, index, middle, ring, pinky]
        self.target_points = [torch.from_numpy(palm).float().to(self.device),\
                            torch.from_numpy(thumb).float().to(self.device),\
                            torch.from_numpy(index).float().to(self.device),\
                            torch.from_numpy(middle).float().to(self.device),\
                            torch.from_numpy(ring).float().to(self.device),\
                            torch.from_numpy(pinky).float().to(self.device)]
        self.target_points_tree = [
            KDTree(palm),
            KDTree(thumb),
            KDTree(index),
            KDTree(middle),
            KDTree(ring),
            KDTree(pinky)
        ]

        # Model para initialization:
        shape = torch.zeros(seeds, 10).float().to(self.device)
        shape.requires_grad_()
        rot = torch.zeros(seeds, 3).float().to(self.device)
        rot.requires_grad_()
        pose = torch.zeros(seeds, self.ncomps).float().to(self.device)
        pose = (0.1 * torch.randn(seeds, self.ncomps)).float().to(self.device)
        pose.requires_grad_()
        trans = torch.from_numpy(samples.mean(0) /
                                 1000.0)  # trans should be in meter
        trans = trans.unsqueeze(0).repeat(seeds, 1).float().to(self.device)
        # trans = (0.1*torch.randn(seeds, 3)).float().to(self.device)
        trans.requires_grad_()

        hand_verts, hand_joints = self.mano_layer(torch.cat((rot, pose), 1),
                                                  shape, trans)

        if self.visualize_interm_result:
            demo.display_mosh(torch.from_numpy(samples).float().unsqueeze(0).expand(seeds, -1, -1),\
                                np.zeros((0,4), dtype = np.int32),
                                {'verts': hand_verts.detach().cpu(),
                                'joints': hand_joints.detach().cpu()}, \
                                mano_faces=self.mano_layer.th_faces.detach().cpu(), \
                                alpha = 0.3)

        # Global optimization
        criteria_loss = nn.MSELoss().to(self.device)
        previous_loss = 1e8
        optimizer = torch.optim.Adam([trans, rot], lr=1e-2)
        print('...Optimizing global transformation...')
        for i in range(0, coarse_iter):
            hand_verts, hand_joints = self.mano_layer(
                torch.cat((rot, pose), 1), shape, trans)
            # Find closest label points:
            closest_indices = self.find_nearest_neighbour_index_from_hand(
                self.target_points_tree, hand_verts, self.no_label)
            closest_points = self.index2points_from_hand(
                closest_indices, torch.zeros_like(hand_verts))

            for j in range(0, 20):
                hand_verts, hand_joints = self.mano_layer(
                    torch.cat((rot, pose), 1), shape, trans)
                hand_verts = self.mask_no_label_from_hand(
                    hand_verts, self.no_label)
                loss = criteria_loss(hand_verts, closest_points)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            loss = criteria_loss(hand_verts, closest_points)
            if verbose >= 1:
                print(i, loss.data)
            if previous_loss - loss.data < 1e-1:
                break
            previous_loss = loss.data.detach()
        # print('After coarse alignment: %6f'%(loss.data))
        if self.visualize_interm_result:
            demo.display_mosh(torch.from_numpy(samples).float().unsqueeze(0).expand(seeds, -1, -1),\
                                np.zeros((0,4), dtype = np.int32),
                                {'verts': hand_verts.detach().cpu(),
                                'joints': hand_joints.detach().cpu()}, \
                                mano_faces=self.mano_layer.th_faces.detach().cpu(), \
                                alpha = 0.3)

        # Local optimization
        previous_loss = 1e8
        optimizer = torch.optim.Adam([trans, rot, pose, shape], lr=1e-2)
        print('...Optimizing hand pose shape and global transformation...')
        for i in range(0, fine_iter):
            hand_verts, hand_joints = self.mano_layer(
                torch.cat((rot, pose), 1), shape, trans)
            # Find closest label points:
            closest_batch_hand_indices, target_points_batch_rearrange = self.find_nearest_neighbour_to_hand(
                self.target_points_np, samples.shape[0], hand_verts,
                self.no_label)
            target_points_batch_rearrange = torch.from_numpy(
                target_points_batch_rearrange).float().to(self.device)

            for j in range(0, 20):
                hand_verts, hand_joints = self.mano_layer(
                    torch.cat((rot, pose), 1), shape, trans)
                hands_batch_rearrange = self.index2points_to_hand(
                    closest_batch_hand_indices, hand_verts)

                w_pose = 100.0
                loss = criteria_loss(
                    hands_batch_rearrange, target_points_batch_rearrange
                ) + w_pose * (pose * pose).mean()  # pose regularizer
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            loss = criteria_loss(hands_batch_rearrange,
                                 target_points_batch_rearrange)

            # if previous_loss - loss.data < 1e-1:
            #     break
            previous_loss = loss.data.detach()

            # Find smallest loss in the #seeds seeds:
            per_seed_error = (
                (hands_batch_rearrange - target_points_batch_rearrange) *
                (hands_batch_rearrange -
                 target_points_batch_rearrange)).mean(2).mean(1)
            min_index = torch.argmin(per_seed_error).detach().cpu().numpy()
            min_error = per_seed_error[min_index]
            if verbose >= 1: print(i, min_error.data)

            if self.visualize_interm_result and i % 40 == 0:
                tmp_arange = np.expand_dims(np.arange(
                    target_points_batch_rearrange.shape[1]),
                                            axis=1)
                link = np.concatenate(
                    (tmp_arange,
                     tmp_arange + target_points_batch_rearrange.shape[1]), 1)
                link = link[0:200, :]
                visual_points = torch.cat(
                    (target_points_batch_rearrange[min_index, :, :],
                     hands_batch_rearrange[min_index, :, :]),
                    dim=0)
                visual_points = visual_points.unsqueeze(0).expand(
                    seeds, -1, -1)

                pass
                demo.display_mosh(visual_points.detach().cpu(),\
                                link,
                                {'verts': hand_verts.detach().cpu(),
                                'joints': hand_joints.detach().cpu()}, \
                                mano_faces=self.mano_layer.th_faces.detach().cpu(), \
                                alpha = 0.3)

            if min_error < stop_loss:
                break

        # print('After fine alignment: %6f'%(loss.data))

        hand_verts, hand_joints = self.mano_layer(torch.cat((rot, pose), 1),
                                                  shape, trans)

        hand_shape = {'vertices': hand_verts.detach().cpu().numpy()[min_index, :, :], \
                        'joints': hand_joints.detach().cpu().numpy()[min_index, :, :], \
                         'faces': self.mano_layer.th_faces.detach().cpu()}

        mano_para = {'rot': rot.detach().cpu().numpy()[min_index, :], \
                    'pose': pose.detach().cpu().numpy()[min_index, :], \
                    'shape': shape.detach().cpu().numpy()[min_index, :], \
                    'trans': trans.detach().cpu().numpy()[min_index, :]}

        return hand_shape, mano_para
Example #23
0
import torch
from manopth.manolayer import ManoLayer
from manopth import demo

batch_size = 10
# Select number of principal components for pose space
ncomps = 6

# Initialize MANO layer
mano_layer = ManoLayer(
    mano_root='..\\mano\\models', use_pca=True, ncomps=ncomps, flat_hand_mean=False,center_idx=9)

# Generate random shape parameters
random_shape = torch.rand(batch_size, 10)
# Generate random pose parameters, including 3 values for global axis-angle rotation
random_pose = torch.rand(batch_size, ncomps + 3)

# Forward pass through MANO layer
hand_verts, hand_joints = mano_layer(random_pose, random_shape)
demo.display_hand({
    'verts': hand_verts,
    'joints': hand_joints
},
                  mano_faces=mano_layer.th_faces)
Example #24
0
def generate_random_hand(batch_size=1, ncomps=6, mano_root='mano/models'):
    nfull_comps = ncomps + 3  # Add global orientation dims to PCA
    random_pcapose = torch.rand(batch_size, nfull_comps)
    mano_layer = ManoLayer(mano_root=mano_root)
    verts, joints = mano_layer(random_pcapose)
    return {'verts': verts, 'joints': joints, 'faces': mano_layer.th_faces}
    def __init__(
        self,
        ncomps=6,
        base_neurons=[1024, 512],
        center_idx=9,
        use_shape=False,
        use_trans=False,
        use_pca=True,
        mano_root="misc/mano",
        adapt_skeleton=True,
        dropout=0,
    ):
        """
        Args:
            mano_root (path): dir containing mano pickle files
        """
        super(ManoBranch, self).__init__()

        self.adapt_skeleton = adapt_skeleton
        self.use_trans = use_trans
        self.use_shape = use_shape
        self.use_pca = use_pca
        self.stereo_shape = torch.Tensor([
            -0.00298099,
            -0.0013994,
            -0.00840144,
            0.00362311,
            0.00248761,
            0.00044125,
            0.00381337,
            -0.00183374,
            -0.00149655,
            0.00137479,
        ]).cuda()

        if self.use_pca:
            # pca comps + 3 global axis-angle params
            mano_pose_size = ncomps + 3
        else:
            # 15 joints + 1 global rotations, 9 comps per rot
            mano_pose_size = 16 * 9
        # Base layers
        base_layers = []
        for layer_idx, (inp_neurons, out_neurons) in enumerate(
                zip(base_neurons[:-1], base_neurons[1:])):
            if dropout:
                base_layers.append(nn.Dropout(p=dropout))
            base_layers.append(nn.Linear(inp_neurons, out_neurons))
            base_layers.append(nn.ReLU())
        self.base_layer = nn.Sequential(*base_layers)

        # Pose layers
        self.pose_reg = nn.Linear(base_neurons[-1], mano_pose_size)
        if not self.use_pca:
            # Initialize all nondiagonal items on rotation matrix weights to 0
            self.pose_reg.bias.data.fill_(0)
            weight_mask = (self.pose_reg.weight.data.new(
                np.identity(3)).view(9).repeat(16))
            self.pose_reg.weight.data = torch.abs(
                weight_mask.unsqueeze(1).repeat(1, 256).float() *
                self.pose_reg.weight.data)

        # Shape layers
        if self.use_shape:
            self.shape_reg = torch.nn.Sequential(
                nn.Linear(base_neurons[-1], 10))

        # Trans layers
        if self.use_trans:
            self.trans_reg = nn.Linear(base_neurons[-1], 3)

        # Mano layers
        self.mano_layer_right = ManoLayer(
            ncomps=ncomps,
            center_idx=center_idx,
            side="right",
            mano_root=mano_root,
            use_pca=use_pca,
        )
        self.mano_layer_left = ManoLayer(
            ncomps=ncomps,
            center_idx=center_idx,
            side="left",
            mano_root=mano_root,
            use_pca=use_pca,
        )
        if self.adapt_skeleton:
            joint_nb = 21
            self.left_skeleton_reg = nn.Linear(joint_nb, joint_nb, bias=False)
            self.left_skeleton_reg.weight.data = torch.eye(joint_nb)
            self.right_skeleton_reg = nn.Linear(joint_nb, joint_nb, bias=False)
            self.right_skeleton_reg.weight.data = torch.eye(joint_nb)

        self.faces = self.mano_layer_right.th_faces
Example #26
0
# %%
from manopth.manolayer import ManoLayer
from manopth.demo import display_hand
import torch
import numpy as np

batch_size = 1

# number of principal componenets for pose space
ncomps = 45

mano_layer = ManoLayer(
    mano_root="mano/models",
    use_pca=False,
    ncomps=ncomps,
    flat_hand_mean=False
)

pose = torch.tensor(
    [
        10.0, 10.0, 10.0,
        1.25, 0.5, 1.0,
        0.0, 0.0, 1.0,
        0.0, 0.0, 0.5,
        0.25, 0.5, 1.0,
        0.0, 0.0, 1.0,
        0.0, 0.0, 0.7,
        0.25, 0.5, 1.5,
        1.0, 0.0, 1.0,
        0.0, 0.0, 1.5,
        0.25, 0.5, 1.0,
Example #27
0
import torch
from manopth.manolayer import ManoLayer
from manopth.manolayer2 import ManoLayer as ManoLayer2

from manopth import demo
torch.random.manual_seed(4)
batch_size = 10
# Select number of principal components for pose space
ncomps = 6

# Initialize MANO layer
mano_layer = ManoLayer(mano_root='mano/models',
                       use_pca=True,
                       ncomps=ncomps,
                       flat_hand_mean=False)

# Generate random shape parameters
random_shape = torch.rand(batch_size, 10)
# Generate random pose parameters, including 3 values for global axis-angle rotation
random_pose = torch.rand(batch_size, ncomps + 3)

# Forward pass through MANO layer
hand_verts, hand_joints = mano_layer(random_pose, random_shape)
demo.display_hand({
    'verts': hand_verts,
    'joints': hand_joints
},
                  mano_faces=mano_layer.th_faces)
Example #28
0
def main(args):
    batch_size = args.batch_size
    if args.use_pca:
        ncomps = 12
    else:
        ncomps = 45

    # Initialize MANO layer
    mano_layer = ManoLayer(
        mano_root="mano/models",
        use_pca=args.use_pca,
        ncomps=ncomps,
        flat_hand_mean=args.flat_hand_mean,
        center_idx=9,
        return_transf=True,
    )
    faces = np.array(mano_layer.th_faces).astype(np.long)

    # Generate random shape parameters
    random_shape = torch.rand(batch_size, 10)
    # Generate random pose parameters, including 3 values for global axis-angle rotation
    if args.use_pca:
        random_pose = torch.rand(batch_size, ncomps + 3)
    else:
        random_pose = torch.zeros(batch_size, ncomps + 3)

    # Forward pass through MANO layer
    vertices, joints, transf = mano_layer(random_pose, random_shape)

    if args.render == "plt":
        demo.display_hand(
            {
                "verts": vertices,
                "joints": joints
            },
            mano_faces=mano_layer.th_faces,
        )
    elif args.render == "pyrender":
        # =========================== Viewer Options >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
        scene = pyrender.Scene()
        cam = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=1.414)
        node_cam = pyrender.Node(camera=cam, matrix=np.eye(4))
        scene.add_node(node_cam)
        scene.set_pose(node_cam, pose=np.eye(4))
        vertex_colors = np.array([200, 200, 200, 150])
        joint_colors = np.array([10, 73, 233, 255])
        # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
        transl = np.array([0, 0, -200.0])
        transl = transl[np.newaxis, :]
        joints = np.array(joints[0])
        vertices = np.array(vertices[0])
        transf = np.array(transf[0])

        joints = joints * 1000.0 + transl
        vertices = vertices * 1000.0 + transl
        transf[:, :3, 3] = transf[:, :3, 3] * 1000.0 + transl

        tri_mesh = trimesh.Trimesh(vertices,
                                   faces,
                                   vertex_colors=vertex_colors)
        mesh = pyrender.Mesh.from_trimesh(tri_mesh)
        scene.add(mesh)

        # Add Joints
        for j in range(21):
            sm = trimesh.creation.uv_sphere(radius=2)
            sm.visual.vertex_colors = joint_colors
            tfs = np.tile(np.eye(4), (1, 1, 1))
            tfs[0, :3, 3] = joints[j]
            joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs)
            scene.add(joints_pcl)

        # Add Transformation
        for tf in range(16):
            axis = trimesh.creation.axis(transform=transf[tf],
                                         origin_size=3,
                                         axis_length=15)
            axis = pyrender.Mesh.from_trimesh(axis, smooth=False)
            scene.add(axis)

        pyrender.Viewer(scene, use_raymond_lighting=True)
    else:
        raise ValueError(f"Unknown renderer: {args.render}")

    return 0
Example #29
0
    frame = Frames_keypoints[i]
    Cordinates = []
    for j in range(0, len(frame), 4):
        Cordinates.append([frame[j], frame[j + 1], frame[j + 2]])
    Cors = np.array(Cordinates, dtype=np.float32)
    return Cors


if __name__ == "__main__":
    batch_size = 1
    # Select number of principal components for pose space
    ncomps = 6
    # Initialize MANO layer
    mano_layer = ManoLayer(
        mano_root='D:\\pycharm_project\\Fit_hands\\manopth\\mano\\models',
        use_pca=True,
        ncomps=ncomps,
        flat_hand_mean=False)
    json_file = "..\\json_file\\3dkeypoints.json"
    cor = None
    f = open(json_file, 'r')
    Frames_keypoints = []
    for line in f.readlines():
        dic = json.loads(line)
        cor = dic['people'][0]["hand_right_keypoints_3d"]
        Frames_keypoints.append(cor)
    f.close()

    random_shape = torch.rand(1, 10)
    random_shape.requires_grad = True
    # Generate random pose parameters, including 3 values for global axis-angle rotation
        '--iters',
        type=int,
        default=100,
        help=
        "Use for quick profiling of forward and backward pass accross ManoLayer"
    )
    parser.add_argument('--mano_root', default='../mano/models')
    parser.add_argument(
        '--mano_ncomps', default=6, type=int, help="Number of PCA components")
    args = parser.parse_args()

    argutils.print_args(args)

    layer = ManoLayer(
        flat_hand_mean=args.flat_hand_mean,
        side=args.side,
        mano_root=args.mano_root,
        ncomps=args.mano_ncomps)  # 这里是实例化 只会执行init 函数 不会执行forward
    n_components = 6
    rot = 3

    # Generate random pose coefficients
    pose_params = torch.rand(args.batch_size, n_components + rot)
    pose_params.requires_grad = True
    if args.random_shape:
        shape = torch.rand(args.batch_size, 10)
    else:
        shape = torch.zeros(1)  # Hack to act like None for PyTorch JIT
    if args.cuda:
        pose_params = pose_params.cuda()  # 注意写法的区别
        shape = shape.cuda()