Пример #1
0
    def forward(self, input_offsets, q_parents):
        """ Calculates the quaternion rotations which transform the model
        offsets to the input offsets.

        Inputs are transformed to the space shared by the model using the
        inverse of `q_parents`. Being in the same space allows a simple
        calculation of the difference in rotation.

        Args:
            input_offsets (N, 3): Input vectors of the keypoint w.r.t. their
                parent.
            q_parents (N, 4): Quaternion transformations which provide the
                total derived transformation up to the parent.
        """
        model_offsets = self.offset.repeat(input_offsets.shape[0], 1)
        derived_orientation = quat.qmul(
            q_parents, self.orientation.repeat(q_parents.shape[0], 1))
        inputs_object = quat.q_rot(quat.q_inv(derived_orientation),
                                   input_offsets)
        q_diffs = quat.find_q_v(F.normalize(model_offsets),
                                F.normalize(inputs_object))
        new_qs = quat.qmul(self.orientation.repeat(q_diffs.shape[0], 1),
                           q_diffs)
        new_q_parents = quat.qmul(q_parents, new_qs)
        new_ps = quat.q_rot(quat.q_inv(new_q_parents), input_offsets)

        return new_q_parents, new_qs, new_ps
Пример #2
0
    def forward_kinematics2(self, rotations, positions, scales):
        """
        Forward kinematics using the given quaternion rotations and local
        positions.

        Args:
            rotations (J, 4): Unit quaternions describing local rotations
                for each joint.
            positions (J, 3): Joint positions relative to the parent.
            scales (J, 3): x, y, z scale for each joint.
        """
        rotations_world = []
        positions_world = []
        scales_world = []
        tips = []
        tip_idxs = []

        for i in range(rotations.shape[1]):
            if self.parents_[i] == -1:
                positions_world.append(positions[:, i])
                rotations_world.append(rotations[:, i])
                scales_world.append(scales[:, i])
            else:
                positions_world.append(
                    q_rot(rotations_world[self.parents_[i]], positions[:, i]) \
                    * scales_world[self.parents_[i]]
                    + positions_world[self.parents_[i]]
                )

                if self.inherit_scale:
                    scales_world.append(scales_world[self.parents_[i]] * scales[:, i])
                else:
                    scales_world.append(scales[:, i])

                if self.has_children_[i]:
                    rotations_world.append(
                        qmul(rotations_world[self.parents_[i]], rotations[:, i])
                    )
                else:
                    tip_idxs.append(i + len(tip_idxs) + 1)
                    tip_pos = torch.tensor([0., 1., 0.], device=rotations.device) * scales[:, i] * self.tip_length
                    # tip_pos = tip_pos.repeat(rotations_world[self.parents_[i]].shape[0], 1)
                    tip_pos = q_rot(qmul(rotations_world[self.parents_[i]],
                                           rotations[:, i]), tip_pos) \
                              + positions_world[i]
                    tips.append(tip_pos)
                    rotations_world.append(torch.tensor([1.0, 0.0, 0.0, 0.0], device=rotations.device))

        for i in range(len(tip_idxs)):
            positions_world.insert(tip_idxs[i], tips[i])

        return torch.stack(positions_world)
Пример #3
0
    def forward_kinematics(self, rotations, root_positions):
        """
        Perform forward kinematics using the given trajectory and local rotations.
        Arguments (where N = batch size, L = sequence length, J = number of joints):
         -- rotations: (N, L, J, 4) tensor of unit quaternions describing the local rotations of each joint.
         -- root_positions: (N, L, 3) tensor describing the root joint positions.
        """
        assert len(rotations.shape) == 4
        assert rotations.shape[-1] == 4

        positions_world = []
        rotations_world = []

        expanded_offsets = self._offsets.expand(rotations.shape[0],
                                                rotations.shape[1],
                                                self._offsets.shape[0],
                                                self._offsets.shape[1])

        # Parallelize along the batch and time dimensions
        for i in range(self._offsets.shape[0]):
            if self._parents[i] == -1:
                positions_world.append(root_positions)
                rotations_world.append(rotations[:, :, 0])
            else:
                positions_world.append(qrot(rotations_world[self._parents[i]], expanded_offsets[:, :, i]) \
                                       + positions_world[self._parents[i]])
                if self._has_children[i]:
                    rotations_world.append(
                        qmul(rotations_world[self._parents[i]], rotations[:, :,
                                                                          i]))
                else:
                    # This joint is a terminal node -> it would be useless to compute the transformation
                    rotations_world.append(None)

        return torch.stack(positions_world, dim=3).permute(0, 1, 3, 2)
Пример #4
0
    def to_matrix(self, rotations, positions, scales):
        """
        Converts the local rotations and positions to matrix form.

        Args:
            rotations (N, J, 4): Unit quaternions describing local rotations
                for each joints.
            positions (N, J, 3): Joint positions relative to the parent.
            scales (N, J, 3): Scale parameters relative to the parent.
        """
        rotations_world = []
        positions_world = []
        scales_world = []
        transforms = []

        for i in range(rotations.shape[1]):
            if self.parents_[i] == -1:

                rotations_world.append(rotations[:, i])
                positions_world.append(positions[:, i])
                scales_world.append(scales[:, i])
                transform = q_to_rotation_matrix_v(rotations_world[i])
            else:
                positions_world.append(
                    q_rot(rotations_world[self.parents_[i]], positions[:, i]) \
                    * scales_world[self.parents_[i]]
                    + positions_world[self.parents_[i]]
                )
                q_w = qmul(rotations_world[self.parents_[i]], rotations[:, i])
                transform = q_to_rotation_matrix_v(q_w)

                tip_mod = 1.0

                if self.has_children_[i]:
                    rotations_world.append(q_w)
                else:
                    rotations_world.append(
                        torch.tensor([1., 0., 0., 0.],
                                     dtype=rotations.dtype,
                                     device=rotations.device))
                    tip_mod = self.tip_length

                if self.inherit_scale:
                    scales_world.append(scales_world[self.parents_[i]] * scales[:, i] * tip_mod)
                else:
                    scales_world.append(scales[:, i] * tip_mod)

            # Scale bone
            scale_matrix = torch.diag_embed(torch.cat((scales_world[i], torch.ones(scales.shape[0], 1, device=scales.device)), -1))
            transform = transform @ scale_matrix.to(transform.device)

            # Set position
            transform[:, :3, 3] = positions_world[i]

            transforms.append(transform)

        return torch.stack(transforms, dim=1)
Пример #5
0
    def remove_joints(self, joints_to_remove, dataset):
        """
        Remove the joints specified in 'joints_to_remove', both from the
        skeleton definition and from the dataset (which is modified in place).
        The rotations of removed joints are propagated along the kinematic chain.
        """
        valid_joints = []
        for joint in range(len(self._parents)):
            if joint not in joints_to_remove:
                valid_joints.append(joint)

        # Update all transformations in the dataset
        for count, rotations in enumerate(dataset):
            for joint in joints_to_remove:
                for child in self._children[joint]:
                    rotations[:, :, child] = qmul(rotations[:, :, joint],
                                                  rotations[:, :, child])
                rotations[:, :, joint] = torch.DoubleTensor([1, 0, 0,
                                                             0])  # Identity
            dataset[count] = rotations[:, :, valid_joints]

        index_offsets = np.zeros(len(self._parents), dtype=int)
        new_parents = []
        new_joints_right = []
        new_joints_left = []
        for i, parent in enumerate(self._parents):
            if i not in joints_to_remove:
                new_parents.append(parent - index_offsets[parent])
                if i in self._joints_left:
                    new_joints_left.append(len(new_parents))
                elif i in self._joints_right:
                    new_joints_right.append(len(new_parents))
            else:
                index_offsets[i:] += 1
        self._parents = np.array(new_parents)
        self._joints_left = np.array(new_joints_left)
        self._joints_right = np.array(new_joints_right)

        self._offsets = self._offsets[valid_joints]
        self._compute_metadata()
Пример #6
0
    def forward(self, x):
        batch_size = x.shape[0]
        x = x.view(batch_size, -1, 4)

        pos_pred = x[:, :, :4]
        # shape_pred = x[:, :, 4:]
        rot_offset = F.normalize(pos_pred, dim=-1)
        # scale_pred = torch.min(torch.max(torch.ones_like(shape_pred) * -0.5,
        # shape_pred),
        # torch.ones_like(shape_pred) * 0.5)

        # Position
        positions = self.positions.to(x.device).unsqueeze(0).repeat(
            batch_size, 1, 1)

        # Rotation
        rotations = self.base_rotations.to(torch.cuda.current_device()).repeat(
            batch_size, 1, 1)
        new_rotations = qmul(rotations[:, self.trainable_idxs].view(-1, 4),
                             rot_offset.view(-1, 4))
        new_rotations = new_rotations.view(batch_size, -1, 4)
        rotations[:, self.trainable_idxs] = new_rotations

        # Scale parameter
        scale_params = torch.ones_like(positions)
        # if self.predict_shape is True:
        # scale_params[:, self.trainable_idxs] += scale_pred

        # Forward kinematics
        local_transforms = self.skeleton.to_matrix(rotations, positions,
                                                   scale_params).cuda()
        coord_pred = self.skeleton.forward_kinematics2(rotations, positions,
                                                       scale_params).cuda()
        coord_pred = coord_pred.permute((1, 0, 2))

        return local_transforms, coord_pred, rot_offset
Пример #7
0
    def forward(self, x):
        """Forward pass through the layer.

        Converts the input to world space using known projection and view
        matrices. Once in world space, it is compared to the skeleton derived
        in world space. This comparison yields the model translation and
        rotation.

        Additionally, each local joint is compared to the predicted joints to
        find individual joint transformations.

        Args:
            input: (B x n_p x 3) Tensor of the 3D joint predictions. The x, y
            coordinates are in normalized render coordinates (0, 1) and the z
            value is in world space.

        Returns:
            transforms: (B x n_j x 4 x 4) Tensor defining the transforms for
                each joint.
        """
        # TODO: These matrices need to be passed into SF
        proj_matrix = torch.Tensor([[1.732051, 0.0, 0.0, 0.0],
                                    [0.0, 1.732051, 0.0, 0.0],
                                    [0.0, 0.0, -1.133333, -1.066667],
                                    [0.0, 0.0, -1.0, 0.0]]).cuda()
        view_matrix = torch.Tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0],
                                    [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0,
                                                           1.0]]).cuda()
        proj_matrix_inv = torch.inverse(proj_matrix)

        # Convert all input to world space
        x_h = torch.cat(
            (x, torch.ones(x.shape[0], x.shape[1], 1, device=x.device)), 2)

        z = -x[:, :, 2]
        z_mult = torch.cat(
            (z.repeat(2, 1, 1).view(x.shape[0], 2, -1),
             torch.ones(x.shape[0], 2, self.num_kp, device=x.device)), 1)
        x_clip = x_h.transpose(1, 2) * z_mult
        x_view = proj_matrix_inv @ x_clip

        x_world_unnorm = (torch.inverse(view_matrix) @ x_view).transpose(1, 2)
        x_world_unnorm[:, :, 2] = -z

        x_world = x_world_unnorm[:, :, :3]

        # Compute offsets for input
        parent_idxs = [i for i in self.joint_parent_idxs if i > -1]
        x_parents = torch.cat((torch.zeros(x.shape[0], 1, 3, device=x.device),
                               x_world[:, parent_idxs].clone().detach()), 1)

        x_world_offsets = x_world - x_parents

        # Local positions of the model are scaled based on the input
        bone_idxs = [i for i, val in enumerate(self.kp_hand_map) if val > -1]
        bone_lengths = torch.norm(x_world_offsets[:, bone_idxs], dim=2)

        # Model Rotation Calculation
        pose_joint_idxs = [0, 1, 2]
        pose_joints = x_world[:, pose_joint_idxs].clone()
        joints_mc = pose_joints[:, 1:] - pose_joints[:, 0].unsqueeze(1)

        # Find the normal of the triangle defined in the skeleton
        p2 = torch.stack(
            (F.normalize(joints_mc[:, 0]), F.normalize(joints_mc[:, 1])),
            dim=1)
        q = quat.find_q_v(self.model_anchor.repeat(x.shape[0], 1, 1), p2)
        model_rotation = q

        # Set new root rotation
        root_rotation = quat.qmul(
            model_rotation, self.rotations[0].repeat(model_rotation.shape[0],
                                                     1))

        # DEBUG: Joint 1
        # f2r_p = quat.q_rot(quat.q_inv(root_rotation), x_world_offsets[:, 1])
        tipr_p = quat.q_rot(quat.q_inv(root_rotation), x_world_offsets[:, 1])

        # Calculate joint rotations
        tip_q_parents, tip_q, tip_p = self.tip(x_world_offsets[:, 2],
                                               root_rotation)

        positions = []
        positions.append(x_world[:, 0])
        positions.append(tipr_p)
        positions = torch.stack(positions, dim=1)

        rotations = [root_rotation, tip_q]
        rotations = torch.stack(rotations, dim=1)

        local_transforms = self.skeleton.to_matrix(rotations, positions).cuda()

        return local_transforms, bone_lengths