예제 #1
0
 def array_to_pose_data(self, skel, data, T_root_ref=None):
     assert len(data) == self._num_dofs + 6
     T_root = conversions.Rp2T(conversions.A2R(data[3:6]), data[0:3])
     if T_root_ref is not None:
         T_root = np.dot(T_root_ref, T_root)
     pose_data = []
     idx = 6
     for i in range(skel.num_joint()):
         joint = skel.joints[i]
         if joint == skel.root_joint:
             pose_data.append(T_root)
         else:
             j = self._char_info.bvh_map_inv[joint.name]
             if j is None:
                 pose_data.append(constants.eye_T())
             else:
                 joint_type = self._joint_type[j]
                 if joint_type == self._pb_client.JOINT_FIXED:
                     pose_data.append(constants.eye_T())
                 elif joint_type == self._pb_client.JOINT_SPHERICAL:
                     pose_data.append(
                         conversions.R2T(conversions.A2R(data[idx:idx +
                                                              3])))
                     idx += 3
                 else:
                     raise NotImplementedError()
     return pose_data
예제 #2
0
    def test_R2A(self):
        R = test_utils.get_random_R()
        A = conversions.R2A(np.array([R]))

        np.testing.assert_almost_equal(
            conversions.R2A(np.array([R]))[0], conversions.R2A(R)
        )
        np.testing.assert_almost_equal(
            conversions.A2R(np.array([A]))[0], conversions.A2R(A)
        )

        R_test = conversions.A2R(A[0])
        for r, r_test in zip(R.flatten(), R_test.flatten()):
            self.assertAlmostEqual(r, r_test)
예제 #3
0
def append_and_blend(motion1, motion2, blend_length=0):
    assert isinstance(motion1, motion_class.Motion)
    assert isinstance(motion2, motion_class.Motion)
    assert motion1.skel.num_joints() == motion2.skel.num_joints()

    combined_motion = copy.deepcopy(motion1)
    combined_motion.name = f"{motion1.name}+{motion2.name}"

    if motion1.num_frames() == 0:
        for i in range(motion2.num_frames()):
            combined_motion.poses.append(motion2.poses[i])
            if hasattr(motion1, "velocities"):
                combined_motion.velocities.append(motion2.velocities[i])
        return combined_motion

    frame_target = motion2.time_to_frame(blend_length)
    frame_source = motion1.time_to_frame(motion1.length() - blend_length)

    # Translate and rotate motion2 to location of frame_source
    pose1 = motion1.get_pose_by_frame(frame_source)
    pose2 = motion2.get_pose_by_frame(0)

    R1, p1 = conversions.T2Rp(pose1.get_root_transform())
    R2, p2 = conversions.T2Rp(pose2.get_root_transform())

    # Translation to be applied
    dp = p1 - p2
    dp = dp - math.projectionOnVector(dp, motion1.skel.v_up_env)
    axis = motion1.skel.v_up_env

    # Rotation to be applied
    Q1 = conversions.R2Q(R1)
    Q2 = conversions.R2Q(R2)
    _, theta = quaternion.Q_closest(Q1, Q2, axis)
    dR = conversions.A2R(axis * theta)

    motion2 = translate(motion2, dp)
    motion2 = rotate(motion2, dR)

    t_total = motion1.fps * frame_source
    t_processed = 0.0
    poses_new = []
    for i in range(motion2.num_frames()):
        dt = 1 / motion2.fps
        t_total += dt
        t_processed += dt
        pose_target = motion2.get_pose_by_frame(i)
        # Blend pose for a moment
        if t_processed <= blend_length:
            alpha = t_processed / float(blend_length)
            pose_source = motion1.get_pose_by_time(t_total)
            pose_target = blend(pose_source, pose_target, alpha)
        poses_new.append(pose_target)

    del combined_motion.poses[frame_source + 1:]
    for i in range(len(poses_new)):
        combined_motion.add_one_frame(0, copy.deepcopy(poses_new[i].data))

    return combined_motion
예제 #4
0
def slerp(R1, R2, t):
    """
    Spherical linear interpolation (https://en.wikipedia.org/wiki/Slerp)
    between R1 and R2 with parameter t, 0 ≤ t ≤ 1
    """
    return np.dot(
        R1, conversions.A2R(t * conversions.R2A(np.dot(R1.transpose(), R2)))
    )
예제 #5
0
def random_rotation(mu_theta, sigma_theta, lower_theta, upper_theta):
    """
    Generate a random position by a truncated normal districution
    """
    theta = truncnorm(
        mu=mu_theta, sigma=sigma_theta, lower=lower_theta, upper=upper_theta
    )
    return conversions.A2R(random_unit_vector() * theta)
예제 #6
0
    def compute_target_pose(self, idx, action):
        agent = self._sim_agent[idx]
        char_info = agent._char_info
        ''' the current posture should be deepcopied because action will modify it '''
        if self._action_type == Env.ActionMode.Relative:
            ref_pose = copy.deepcopy(self.get_current_pose_from_motion(idx))
        else:
            ref_pose = copy.deepcopy(
                self._base_motion[idx].get_pose_by_frame(0))

        a_real = self._action_space[idx].norm_to_real(action)

        dof_cnt = 0
        for j in agent._joint_indices:
            joint_type = agent.get_joint_type(j)
            ''' Fixed joint will not be affected '''
            if joint_type == self._pb_client.JOINT_FIXED:
                continue
            ''' If the joint do not have correspondance, use the reference posture itself'''
            if char_info.bvh_map[j] == None:
                continue
            if self._action_type == Env.ActionMode.Relative:
                T = ref_pose.get_transform(char_info.bvh_map[j], local=True)
            elif self._action_type == Env.ActionMode.Absolute:
                T = ref_pose.skel.get_joint(
                    char_info.bvh_map[j]).xform_from_parent_joint
            else:
                raise NotImplementedError
            R, p = conversions.T2Rp(T)
            if joint_type == self._pb_client.JOINT_SPHERICAL:
                dR = conversions.A2R(a_real[dof_cnt:dof_cnt + 3])
                dof_cnt += 3
            elif joint_type == self._pb_client.JOINT_REVOLUTE:
                axis = agent.get_joint_axis(j)
                angle = a_real[dof_cnt:dof_cnt + 1]
                dR = conversions.A2R(axis * angle)
                dof_cnt += 1
            else:
                raise NotImplementedError
            T_new = conversions.Rp2T(np.dot(R, dR), p)
            ref_pose.set_transform(char_info.bvh_map[j],
                                   T_new,
                                   do_ortho_norm=False,
                                   local=True)

        return ref_pose
예제 #7
0
def create_motion_from_amass_data(filename, bm, override_betas=None):
    bdata = np.load(filename)

    if override_betas is not None:
        betas = torch.Tensor(override_betas[:10][np.newaxis]).to("cpu")
    else:
        betas = torch.Tensor(bdata["betas"][:10][np.newaxis]).to("cpu")

    skel = create_skeleton_from_amass_bodymodel(
        bm,
        betas,
        len(joint_names),
        joint_names,
    )

    fps = float(bdata["mocap_framerate"])
    root_orient = bdata["poses"][:, :3]  # controls the global root orientation
    pose_body = bdata["poses"][:, 3:66]  # controls body joint angles
    trans = bdata["trans"][:, :3]  # controls the finger articulation

    motion = motion_class.Motion(skel=skel, fps=fps)

    num_joints = skel.num_joints()
    parents = bm.kintree_table[0].long()[:num_joints]

    for frame in range(pose_body.shape[0]):
        pose_body_frame = pose_body[frame]
        root_orient_frame = root_orient[frame]
        root_trans_frame = trans[frame]
        pose_data = []
        for j in range(num_joints):
            if j == 0:
                T = conversions.Rp2T(conversions.A2R(root_orient_frame),
                                     root_trans_frame)
            else:
                T = conversions.R2T(
                    conversions.A2R(pose_body_frame[(j - 1) * 3:(j - 1) * 3 +
                                                    3]))
            pose_data.append(T)
        motion.add_one_frame(pose_data)

    return motion
    def add_noise_to_pose_vel(self,
                              agent,
                              pose,
                              vel=None,
                              return_as_copied=True):
        '''
        Add a little bit of noise to the given pose and velocity
        '''

        ref_pose = copy.deepcopy(pose) if return_as_copied else pose
        if vel:
            ref_vel = copy.deepcopy(vel) if return_as_copied else vel
        dof_cnt = 0
        for j in agent._joint_indices:
            joint_type = agent.get_joint_type(j)
            ''' Ignore fixed joints '''
            if joint_type == self._pb_client.JOINT_FIXED:
                continue
            ''' Ignore if there is no corresponding joint '''
            if agent._char_info.bvh_map[j] == None:
                continue
            T = ref_pose.get_transform(agent._char_info.bvh_map[j], local=True)
            R, p = conversions.T2Rp(T)
            if joint_type == self._pb_client.JOINT_SPHERICAL:
                dR = math.random_rotation(
                    mu_theta=agent._char_info.noise_pose[j][0],
                    sigma_theta=agent._char_info.noise_pose[j][1],
                    lower_theta=agent._char_info.noise_pose[j][2],
                    upper_theta=agent._char_info.noise_pose[j][3])
                dof_cnt += 3
            elif joint_type == self._pb_client.JOINT_REVOLUTE:
                theta = math.truncnorm(mu=agent._char_info.noise_pose[j][0],
                                       sigma=agent._char_info.noise_pose[j][1],
                                       lower=agent._char_info.noise_pose[j][2],
                                       upper=agent._char_info.noise_pose[j][3])
                joint_axis = agent.get_joint_axis(j)
                dR = conversions.A2R(joint_axis * theta)
                dof_cnt += 1
            else:
                raise NotImplementedError
            T_new = conversions.Rp2T(np.dot(R, dR), p)
            ref_pose.set_transform(agent._char_info.bvh_map[j],
                                   T_new,
                                   do_ortho_norm=False,
                                   local=True)
            if vel is not None:
                dw = math.truncnorm(
                    mu=np.full(3, agent._char_info.noise_vel[j][0]),
                    sigma=np.full(3, agent._char_info.noise_vel[j][1]),
                    lower=np.full(3, agent._char_info.noise_vel[j][2]),
                    upper=np.full(3, agent._char_info.noise_vel[j][3]))
                ref_vel.data_local[j][:3] += dw
        return ref_pose, ref_vel