Ejemplo n.º 1
0
def append_and_blend(motion1, motion2, blend_length=0):
    assert isinstance(motion1, motion_class.Motion)
    assert isinstance(motion2, motion_class.Motion)
    assert motion1.skel.num_joints() == motion2.skel.num_joints()

    combined_motion = copy.deepcopy(motion1)
    combined_motion.name = f"{motion1.name}+{motion2.name}"

    if motion1.num_frames() == 0:
        for i in range(motion2.num_frames()):
            combined_motion.poses.append(motion2.poses[i])
            if hasattr(motion1, "velocities"):
                combined_motion.velocities.append(motion2.velocities[i])
        return combined_motion

    frame_target = motion2.time_to_frame(blend_length)
    frame_source = motion1.time_to_frame(motion1.length() - blend_length)

    # Translate and rotate motion2 to location of frame_source
    pose1 = motion1.get_pose_by_frame(frame_source)
    pose2 = motion2.get_pose_by_frame(0)

    R1, p1 = conversions.T2Rp(pose1.get_root_transform())
    R2, p2 = conversions.T2Rp(pose2.get_root_transform())

    # Translation to be applied
    dp = p1 - p2
    dp = dp - math.projectionOnVector(dp, motion1.skel.v_up_env)
    axis = motion1.skel.v_up_env

    # Rotation to be applied
    Q1 = conversions.R2Q(R1)
    Q2 = conversions.R2Q(R2)
    _, theta = quaternion.Q_closest(Q1, Q2, axis)
    dR = conversions.A2R(axis * theta)

    motion2 = translate(motion2, dp)
    motion2 = rotate(motion2, dR)

    t_total = motion1.fps * frame_source
    t_processed = 0.0
    poses_new = []
    for i in range(motion2.num_frames()):
        dt = 1 / motion2.fps
        t_total += dt
        t_processed += dt
        pose_target = motion2.get_pose_by_frame(i)
        # Blend pose for a moment
        if t_processed <= blend_length:
            alpha = t_processed / float(blend_length)
            pose_source = motion1.get_pose_by_time(t_total)
            pose_target = blend(pose_source, pose_target, alpha)
        poses_new.append(pose_target)

    del combined_motion.poses[frame_source + 1:]
    for i in range(len(poses_new)):
        combined_motion.add_one_frame(0, copy.deepcopy(poses_new[i].data))

    return combined_motion
Ejemplo n.º 2
0
def blend(pose1, pose2, alpha=0.5):
    assert 0.0 <= alpha <= 1.0
    pose_new = copy.deepcopy(pose1)
    for j in range(pose1.skel.num_joints()):
        R0, p0 = conversions.T2Rp(pose1.get_transform(j, local=True))
        R1, p1 = conversions.T2Rp(pose2.get_transform(j, local=True))
        R, p = math.slerp(R0, R1, alpha), math.lerp(p0, p1, alpha)
        pose_new.set_transform(j, conversions.Rp2T(R, p), local=True)
    return pose_new
Ejemplo n.º 3
0
    def difference(self, node_A, node_B, frames_compare, num_comparison):
        """
        Calculate the difference between the two given nodes.
        """
        motion_idx_A = node_A["motion_idx"]
        frames_A = node_A["frame_start"]

        motion_idx_B = node_B["motion_idx"]
        frames_B = node_B["frame_start"]
        diff_pose = 0.0
        diff_root_ee = 0.0
        diff_trajectory = 0.0

        for k in range(0, frames_compare + 1,
                       (frames_compare + 1) // num_comparison):
            pose = self.motions[motion_idx_A].get_pose_by_frame(frames_A + k)
            vel = self.motions[motion_idx_A].get_velocity_by_frame(frames_A +
                                                                   k)
            pose_j = self.motions[motion_idx_B].get_pose_by_frame(frames_B + k)
            vel_j = self.motions[motion_idx_B].get_velocity_by_frame(frames_B +
                                                                     k)
            if k == 0:
                T_ref = pose.get_facing_transform()
                T_ref_j = pose_j.get_facing_transform()
            diff_pose += similarity.pose_similarity(pose, pose_j, vel, vel_j,
                                                    self.w_joint_pos,
                                                    self.w_joint_vel,
                                                    self.w_joints)
            diff_root_ee += similarity.root_ee_similarity(
                pose,
                pose_j,
                vel,
                vel_j,
                self.w_root_pos,
                self.w_root_vel,
                self.w_ee_pos,
                self.w_ee_vel,
                T_ref,
                T_ref_j,
            )
            if self.w_trajectory > 0.0:
                R, p = conversions.T2Rp(pose.get_facing_transform())
                R_j, p_j = conversions.T2Rp(pose_j.get_facing_transform())
                if k > 0:
                    d = np.dot(R_prev.transpose(), p - p_prev)
                    d_j = np.dot(R_j_prev.transpose(), p_j - p_j_prev)
                    d = d - d_j
                    diff_trajectory += np.dot(d, d)
                R_prev, p_prev = R, p
                R_j_prev, p_j_prev = R_j, p_j
            diff_pose /= num_comparison
            diff_root_ee /= num_comparison
            diff_trajectory /= num_comparison
            diff = diff_pose + diff_root_ee + diff_trajectory

            return diff
Ejemplo n.º 4
0
def calculate_metrics(pred_seqs, tgt_seqs):
    metric_frames = [6, 12, 18, 24]
    R_pred, _ = conversions.T2Rp(pred_seqs)
    R_tgt, _ = conversions.T2Rp(tgt_seqs)
    euler_error = metrics.euler_diff(
        R_pred[:, :, amass_dip.SMPL_MAJOR_JOINTS],
        R_tgt[:, :, amass_dip.SMPL_MAJOR_JOINTS],
    )
    euler_error = np.mean(euler_error, axis=0)
    mae = {frame: np.sum(euler_error[:frame]) for frame in metric_frames}
    return mae
Ejemplo n.º 5
0
 def interpolate(cls, pose1, pose2, alpha):
     skel = pose1.skel
     data = []
     for j in skel.joints:
         R1, p1 = conversions.T2Rp(pose1.get_transform(j, local=True))
         R2, p2 = conversions.T2Rp(pose2.get_transform(j, local=True))
         R, p = (
             math.slerp(R1, R2, alpha),
             math.lerp(p1, p2, alpha),
         )
         data.append(conversions.Rp2T(R, p))
     return Pose(pose1.skel, data)
Ejemplo n.º 6
0
def pose_similarity(
    pose1,
    pose2,
    vel1=None,
    vel2=None,
    w_joint_pos=0.9,
    w_joint_vel=0.1,
    w_joints=None,
    apply_root_correction=True,
):
    """
    This only measure joint angle difference (i.e. root translation will
    not be considered).
    If 'apply_root_correction' is True, then pose2 will be rotated
    automatically
    in a way that its root rotation is closest to the root rotation of pose1,
    where'root_correction_axis' defines the geodesic curve.
    """
    assert pose1.skel.num_joints() == pose2.skel.num_joints()
    skel = pose1.skel
    if vel1 is not None:
        assert vel2 is not None
        assert vel1.skel.num_joints() == vel2.skel.num_joints()

    if w_joints is None:
        w_joints = np.ones(skel.num_joints())
    """ joint angle difference """
    diff_pos = 0.0
    if w_joint_pos > 0.0:
        root_idx = skel.get_index_joint(skel.root_joint)
        v_up_env = pose1.skel.v_up_env
        for j in range(skel.num_joints()):
            R1, p1 = conversions.T2Rp(pose1.get_transform(j, local=True))
            R2, p2 = conversions.T2Rp(pose2.get_transform(j, local=True))
            if apply_root_correction and j == root_idx:
                Q1 = conversions.R2Q(R1)
                Q2 = conversions.R2Q(R2)
                Q2, _ = quaternion.Q_closest(Q1, Q2, v_up_env)
                R2 = conversions.Q2R(Q2)
            # TODO: Verify if logSO3 is same as R2A
            dR = conversions.R2A(np.dot(np.transpose(R1), R2))
            diff_pos += w_joints[j] * np.dot(dR, dR)
    """ joint angular velocity difference """
    diff_vel = 0.0
    if vel1 is not None and w_joint_vel > 0.0:
        skel = vel1.skel
        for j in range(skel.num_joints()):
            dw = vel2.get_angular(j, local=True) - vel1.get_angular(j,
                                                                    local=True)
            diff_vel += w_joints[j] * np.dot(dw, dw)
    return (w_joint_pos * diff_pos +
            w_joint_vel * diff_vel) / skel.num_joints()
Ejemplo n.º 7
0
 def test_motion(self):
     motion = bvh.load(file=TEST_SINUSOIDAL_FILE)
     # Inspect 0th frame, root joint
     T = motion.get_pose_by_frame(0).get_transform(0, local=False)
     _, p = conversions.T2Rp(T)
     self.assertListEqual(list(p), [-3, 6, 5])
     # Inspect 100th frame, root joint
     T = motion.get_pose_by_frame(100).get_transform(0, local=False)
     _, p = conversions.T2Rp(T)
     self.assertListEqual(list(p), [-3, 6, 5])
     # Inspect 100th frame, "child2" joint
     T = motion.get_pose_by_frame(100).get_transform("child2", local=False)
     _, p = conversions.T2Rp(T)
     self.assertListEqual(list(p), [-8, 11, 5])
Ejemplo n.º 8
0
def append_and_blend(motion1, motion2, blend_length=0):
    assert isinstance(motion1, motion_class.Motion)
    assert isinstance(motion2, motion_class.Motion)
    assert motion1.skel.num_joints() == motion2.skel.num_joints()

    combined_motion = copy.deepcopy(motion1)
    combined_motion.name = f"{motion1.name}+{motion2.name}"

    if motion1.num_frames() == 0:
        for i in range(motion2.num_frames()):
            combined_motion.poses.append(motion2.poses[i])
            if hasattr(motion1, "velocities"):
                combined_motion.velocities.append(motion2.velocities[i])
        return combined_motion

    frame_target = motion2.time_to_frame(blend_length)
    frame_source = motion1.time_to_frame(motion1.length() - blend_length)

    # Translate and rotate motion2 to location of frame_source
    pose1 = motion1.get_pose_by_frame(frame_source)
    pose2 = motion2.get_pose_by_frame(0)

    R1, p1 = conversions.T2Rp(pose1.get_root_transform())
    R2, p2 = conversions.T2Rp(pose2.get_root_transform())

    # Translation to be applied
    dp = p1 - p2
    motion2 = translate(motion2, dp)

    t_total = motion1.fps * frame_source
    t_processed = 0.0
    poses_new = []
    dt = 1 / motion2.fps
    for i in range(motion2.num_frames()):
        t_total += dt
        t_processed += dt
        pose_target = motion2.get_pose_by_frame(i)
        # Blend pose for a moment
        if t_processed <= blend_length:
            alpha = t_processed / float(blend_length)
            pose_source = motion1.get_pose_by_time(t_total)
            pose_target = blend(pose_source, pose_target, alpha)
        poses_new.append(pose_target)

    del combined_motion.poses[frame_source + 1:]
    for i in range(len(poses_new)):
        combined_motion.add_one_frame(copy.deepcopy(poses_new[i].data))

    return combined_motion
Ejemplo n.º 9
0
    def test_R2T(self):
        T = test_utils.get_random_T()
        R, _ = conversions.T2Rp(T)

        np.testing.assert_almost_equal(
            conversions.R2T(np.array([R]))[0], conversions.R2T(R),
        )
Ejemplo n.º 10
0
    def test_p2T(self):
        T = test_utils.get_random_T()
        _, p = conversions.T2Rp(T)

        np.testing.assert_almost_equal(
            conversions.p2T(np.array([p]))[0], conversions.p2T(p),
        )
Ejemplo n.º 11
0
def _write_hierarchy(motion, file, joint, scale=1.0, rot_order="XYZ", tab=""):
    def rot_order_to_str(order):
        if order == "xyz" or order == "XYZ":
            return "Xrotation Yrotation Zrotation"
        elif order == "zyx" or order == "ZYX":
            return "Zrotation Yrotation Xrotation"
        else:
            raise NotImplementedError

    joint_order = [joint.name]
    is_root_joint = joint.parent_joint is None
    if is_root_joint:
        file.write(tab + "ROOT %s\n" % joint.name)
    else:
        file.write(tab + "JOINT %s\n" % joint.name)
    file.write(tab + "{\n")
    R, p = conversions.T2Rp(joint.xform_from_parent_joint)
    p *= scale
    file.write(tab + "\tOFFSET %f %f %f\n" % (p[0], p[1], p[2]))
    if is_root_joint:
        file.write(tab + "\tCHANNELS 6 Xposition Yposition Zposition %s\n" %
                   rot_order_to_str(rot_order))
    else:
        file.write(tab + "\tCHANNELS 3 %s\n" % rot_order_to_str(rot_order))
    for child_joint in joint.child_joints:
        child_joint_order = _write_hierarchy(motion, file, child_joint, scale,
                                             rot_order, tab + "\t")
        joint_order.extend(child_joint_order)
    if len(joint.child_joints) == 0:
        file.write(tab + "\tEnd Site\n")
        file.write(tab + "\t{\n")
        file.write(tab + "\t\tOFFSET %f %f %f\n" % (0.0, 0.0, 0.0))
        file.write(tab + "\t}\n")
    file.write(tab + "}\n")
    return joint_order
Ejemplo n.º 12
0
def render_joint(joint, option):
    glPushAttrib(GL_LIGHTING)
    if option.lighting:
        glEnable(GL_LIGHTING)
    else:
        glDisable(GL_LIGHTING)
    j_type = joint.type()
    T0 = joint.child_bodynode.transform()
    T1 = joint.transform_from_child_body_node()
    T = T0.dot(T1)
    R, p = conversions.T2Rp(T)

    scale = option.scale

    if option.render_pos:
        gl_render.render_point(p,
                               color=option.color_pos,
                               scale=scale,
                               radius=0.1)

    if option.render_ori:
        if j_type == "WeldJoint":
            pass
        elif j_type == "RevoluteJoint":
            axis1 = joint.axis_in_world_frame()
            gl_render.render_line(p,
                                  p + scale * axis1,
                                  color=option.color_ori[0])
        elif j_type == "UniversalJoint":
            axis1 = joint.axis1_in_world_frame()
            axis2 = joint.axis2_in_world_frame()
            gl_render.render_line(p,
                                  p + scale * axis1,
                                  color=option.color_ori[0])
            gl_render.render_line(p,
                                  p + scale * axis2,
                                  color=option.color_ori[1])
        elif j_type == "EulerJoint":
            gl_render.render_transform(T,
                                       scale=scale,
                                       color_pos=option.color_pos,
                                       color_ori=option.color_ori)
        elif j_type == "BallJoint":
            gl_render.render_transform(T,
                                       scale=scale,
                                       color_pos=option.color_pos,
                                       color_ori=option.color_ori)
        elif j_type == "TranslationalJoint":
            gl_render.render_transform(T,
                                       scale=scale,
                                       color_pos=option.color_pos,
                                       color_ori=option.color_ori)
        elif j_type == "FreeJoint":
            gl_render.render_transform(T,
                                       scale=scale,
                                       color_pos=option.color_pos,
                                       color_ori=option.color_ori)
        else:
            raise NotImplementedError("Not Implemented")
    glPopAttrib(GL_LIGHTING)
Ejemplo n.º 13
0
    def _state_body(self,
                    agent,
                    T_ref=None,
                    include_com=True,
                    include_p=True,
                    include_Q=True,
                    include_v=True,
                    include_w=True,
                    return_stacked=True):
        if T_ref is None:
            T_ref = agent.get_facing_transform(self.get_ground_height())

        R_ref, p_ref = conversions.T2Rp(T_ref)
        R_ref_inv = R_ref.transpose()

        link_states = []
        link_states.append(agent.get_root_state())
        ps, Qs, vs, ws = agent.get_link_states()
        for j in agent._joint_indices:
            link_states.append((ps[j], Qs[j], vs[j], ws[j]))

        state = []
        for i, s in enumerate(link_states):
            p, Q, v, w = s[0], s[1], s[2], s[3]
            if include_p:
                p_rel = np.dot(R_ref_inv, p - p_ref)
                state.append(
                    p_rel)  # relative position w.r.t. the reference frame
            if include_Q:
                Q_rel = conversions.R2Q(np.dot(R_ref_inv, conversions.Q2R(Q)))
                Q_rel = quaternion.Q_op(Q_rel, op=["normalize", "halfspace"])
                state.append(
                    Q_rel)  # relative rotation w.r.t. the reference frame
            if include_v:
                v_rel = np.dot(R_ref_inv, v)
                state.append(
                    v_rel)  # relative linear vel w.r.t. the reference frame
            if include_w:
                w_rel = np.dot(R_ref_inv, w)
                state.append(
                    w_rel)  # relative angular vel w.r.t. the reference frame
            if include_com:
                if i == 0:
                    p_com = agent._link_masses[i] * p
                    v_com = agent._link_masses[i] * v
                else:
                    p_com += agent._link_masses[i] * p
                    v_com += agent._link_masses[i] * v

        if include_com:
            p_com /= agent._link_total_mass
            v_com /= agent._link_total_mass
            state.append(np.dot(R_ref_inv, p_com - p_ref))
            state.append(np.dot(R_ref_inv, v_com))

        if return_stacked:
            return np.hstack(state)
        else:
            return state
Ejemplo n.º 14
0
 def compute(cls, pose1, pose2, dt):
     assert pose1.skel == pose2.skel
     data_local = []
     data_global = []
     assert dt > constants.EPSILON
     for joint in pose1.skel.joints:
         T1 = pose1.get_transform(joint, local=True)
         T2 = pose2.get_transform(joint, local=True)
         dR, dp = conversions.T2Rp(np.dot(math.invertT(T1), T2))
         w, v = conversions.R2A(dR) / dt, dp / dt
         data_local.append(np.hstack((w, v)))
         T1 = pose1.get_transform(joint, local=False)
         T2 = pose2.get_transform(joint, local=False)
         dR, dp = conversions.T2Rp(np.dot(math.invertT(T1), T2))
         w, v = conversions.R2A(dR) / dt, dp / dt
         data_global.append(np.hstack((w, v)))
     return np.array(data_local), np.array(data_global)
Ejemplo n.º 15
0
    def set_pose(self, pose, vel=None):
        '''
        Velocity should be represented w.r.t. local frame
        '''
        # Root joint
        T = pose.get_transform(self._char_info.bvh_map[self._char_info.ROOT],
                               local=False)
        Q, p = conversions.T2Qp(T)
        p *= self._ref_scale

        v, w = None, None
        if vel is not None:
            # Here we give a root orientation to get velocities represeted in world frame.
            R = conversions.Q2R(Q)
            w = vel.get_angular(self._char_info.bvh_map[self._char_info.ROOT],
                                False, R)
            v = vel.get_linear(self._char_info.bvh_map[self._char_info.ROOT],
                               False, R)
            v *= self._ref_scale

        bu.set_base_pQvw(self._pb_client, self._body_id, p, Q, v, w)

        # Other joints
        indices = []
        state_pos = []
        state_vel = []
        for j in self._joint_indices:
            joint_type = self._joint_type[j]
            # When the target joint do not have dof, we simply ignore it
            if joint_type == self._pb_client.JOINT_FIXED:
                continue
            # When there is no matching between the given pose and the simulated character,
            # the character just tries to hold its initial pose
            if self._char_info.bvh_map[j] is None:
                state_pos.append(self._joint_pose_init[j])
                state_vel.append(self._joint_vel_init[j])
            else:
                T = pose.get_transform(self._char_info.bvh_map[j], local=True)
                if joint_type == self._pb_client.JOINT_SPHERICAL:
                    Q, p = conversions.T2Qp(T)
                    w = np.zeros(3) if vel is None else vel.get_angular(
                        self._char_info.bvh_map[j], local=True)
                    state_pos.append(Q)
                    state_vel.append(w)
                elif joint_type == self._pb_client.JOINT_REVOLUTE:
                    joint_axis = self.get_joint_axis(j)
                    R, p = conversions.T2Rp(T)
                    w = np.zeros(3) if vel is None else vel.get_angular(
                        self._char_info.bvh_map[j], local=True)
                    state_pos.append([math.project_rotation_1D(R, joint_axis)])
                    state_vel.append(
                        [math.project_angular_vel_1D(w, joint_axis)])
                else:
                    raise NotImplementedError()
            indices.append(j)
        bu.set_joint_pv(self._pb_client, self._body_id, indices, state_pos,
                        state_vel)
    def add_noise_to_pose_vel(self,
                              agent,
                              pose,
                              vel=None,
                              return_as_copied=True):
        '''
        Add a little bit of noise to the given pose and velocity
        '''

        ref_pose = copy.deepcopy(pose) if return_as_copied else pose
        if vel:
            ref_vel = copy.deepcopy(vel) if return_as_copied else vel
        dof_cnt = 0
        for j in agent._joint_indices:
            joint_type = agent.get_joint_type(j)
            ''' Ignore fixed joints '''
            if joint_type == self._pb_client.JOINT_FIXED:
                continue
            ''' Ignore if there is no corresponding joint '''
            if agent._char_info.bvh_map[j] == None:
                continue
            T = ref_pose.get_transform(agent._char_info.bvh_map[j], local=True)
            R, p = conversions.T2Rp(T)
            if joint_type == self._pb_client.JOINT_SPHERICAL:
                dR = math.random_rotation(
                    mu_theta=agent._char_info.noise_pose[j][0],
                    sigma_theta=agent._char_info.noise_pose[j][1],
                    lower_theta=agent._char_info.noise_pose[j][2],
                    upper_theta=agent._char_info.noise_pose[j][3])
                dof_cnt += 3
            elif joint_type == self._pb_client.JOINT_REVOLUTE:
                theta = math.truncnorm(mu=agent._char_info.noise_pose[j][0],
                                       sigma=agent._char_info.noise_pose[j][1],
                                       lower=agent._char_info.noise_pose[j][2],
                                       upper=agent._char_info.noise_pose[j][3])
                joint_axis = agent.get_joint_axis(j)
                dR = conversions.A2R(joint_axis * theta)
                dof_cnt += 1
            else:
                raise NotImplementedError
            T_new = conversions.Rp2T(np.dot(R, dR), p)
            ref_pose.set_transform(agent._char_info.bvh_map[j],
                                   T_new,
                                   do_ortho_norm=False,
                                   local=True)
            if vel is not None:
                dw = math.truncnorm(
                    mu=np.full(3, agent._char_info.noise_vel[j][0]),
                    sigma=np.full(3, agent._char_info.noise_vel[j][1]),
                    lower=np.full(3, agent._char_info.noise_vel[j][2]),
                    upper=np.full(3, agent._char_info.noise_vel[j][3]))
                ref_vel.data_local[j][:3] += dw
        return ref_pose, ref_vel
Ejemplo n.º 17
0
    def test_Rp2T(self):
        T = test_utils.get_random_T()
        R, p = conversions.T2Rp(T)

        np.testing.assert_almost_equal(
            conversions.Rp2T(np.array([R]), np.array([p]))[0],
            conversions.Rp2T(R, p),
        )

        T_test = conversions.Rp2T(R, p)
        for t, t_test in zip(T.flatten(), T_test.flatten()):
            self.assertAlmostEqual(t, t_test)
Ejemplo n.º 18
0
def save(motion, filename, scale=1.0, rot_order="XYZ", verbose=False):
    if verbose:
        print(" >  >  Save BVH file: %s" % filename)
    with open(filename, "w") as f:
        """ Write hierarchy """
        if verbose:
            print(" >  >  >  >  Write BVH hierarchy")
        f.write("HIERARCHY\n")
        joint_order = _write_hierarchy(
            motion, f, motion.skel.root_joint, scale, rot_order
        )
        """ Write data """
        if verbose:
            print(" >  >  >  >  Write BVH data")
        t_start = 0
        dt = 1.0 / motion.fps
        num_frames = motion.num_frames()
        f.write("MOTION\n")
        f.write("Frames: %d\n" % num_frames)
        f.write("Frame Time: %f\n" % dt)
        t = t_start

        for i in range(num_frames):
            if verbose and i % motion.fps == 0:
                print(
                    "\r >  >  >  >  %d/%d processed (%d FPS)"
                    % (i + 1, num_frames, motion.fps),
                    end=" ",
                )
            pose = motion.get_pose_by_frame(i)

            for joint_name in joint_order:
                joint = motion.skel.get_joint(joint_name)
                R, p = conversions.T2Rp(pose.get_transform(joint, local=True))
                p *= scale
                R1, R2, R3 = conversions.R2E(R, order=rot_order, degrees=True)
                if joint == motion.skel.root_joint:
                    f.write(
                        "%f %f %f %f %f %f " % (p[0], p[1], p[2], R1, R2, R3)
                    )
                else:
                    f.write("%f %f %f " % (R1, R2, R3))
            f.write("\n")
            t += dt
            if verbose and i == num_frames - 1:
                print(
                    "\r >  >  >  >  %d/%d processed (%d FPS)"
                    % (i + 1, num_frames, motion.fps)
                )
        f.close()
Ejemplo n.º 19
0
def render_transform(
    T,
    scale=1.0,
    line_width=1.0,
    point_size=0.05,
    render_pos=True,
    render_ori=[True, True, True],
    color_pos=[0, 0, 0, 1],
    color_ori=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
    use_arrow=False,
):
    glLineWidth(line_width)

    R, p = conversions.T2Rp(T)

    glPushMatrix()
    glTranslated(p[0], p[1], p[2])
    glScalef(scale, scale, scale)

    if render_pos:
        glColor(color_pos)
        glutSolidSphere(0.5 * point_size, 10, 10)

    if render_ori:
        o = np.zeros(3)
        if use_arrow:
            if render_ori[0]:
                render_arrow(o,
                             o + R[:, 0],
                             D=line_width * 0.02,
                             color=color_ori[0])
            if render_ori[1]:
                render_arrow(o,
                             o + R[:, 1],
                             D=line_width * 0.02,
                             color=color_ori[1])
            if render_ori[2]:
                render_arrow(o,
                             o + R[:, 2],
                             D=line_width * 0.02,
                             color=color_ori[2])
        else:
            if render_ori[0]:
                render_line(o, o + R[:, 0], color=color_ori[0])
            if render_ori[1]:
                render_line(o, o + R[:, 1], color=color_ori[1])
            if render_ori[2]:
                render_line(o, o + R[:, 2], color=color_ori[2])

    glPopMatrix()
    def state_imitation(self, sim_agent, kin_agent, poses, vels, include_abs,
                        include_rel):

        assert len(poses) == len(vels)

        R_sim, p_sim = conversions.T2Rp(
            sim_agent.get_facing_transform(self.get_ground_height()))
        R_sim_inv = R_sim.transpose()
        state_sim = self._state_body(sim_agent, None, return_stacked=False)

        state = []
        state_kin_orig = kin_agent.save_states()
        for pose, vel in zip(poses, vels):
            kin_agent.set_pose(pose, vel)
            state_kin = self._state_body(kin_agent, None, return_stacked=False)
            # Add pos/vel values
            if include_abs:
                state.append(np.hstack(state_kin))
            # Add difference of pos/vel values
            if include_rel:
                for j in range(len(state_sim)):
                    if len(state_sim[j]) == 3:
                        state.append(state_sim[j] - state_kin[j])
                    elif len(state_sim[j]) == 4:
                        state.append(
                            self._pb_client.getDifferenceQuaternion(
                                state_sim[j], state_kin[j]))
                    else:
                        raise NotImplementedError
            ''' Add facing frame differences '''
            R_kin, p_kin = conversions.T2Rp(
                kin_agent.get_facing_transform(self.get_ground_height()))
            state.append(np.dot(R_sim_inv, p_kin - p_sim))
            state.append(np.dot(R_sim_inv, kin_agent.get_facing_direction()))
        kin_agent.restore_states(state_kin_orig)

        return np.hstack(state)
Ejemplo n.º 21
0
def render_path(data,
                color=[0.0, 0.0, 0.0],
                scale=1.0,
                line_width=1.0,
                point_size=1.0):
    glColor(color)
    glLineWidth(line_width)
    glBegin(GL_LINE_STRIP)
    for d in data:
        R, p = conversions.T2Rp(d)
        glVertex3d(p[0], p[1], p[2])
    glEnd()

    for d in data:
        render_transform(d, scale, line_width, point_size)
Ejemplo n.º 22
0
def transform(motion, T, local=False):
    """
    Apply transform to all poses of a motion sequence. The operation is done
    in-place.

    Args:
        motion: Motion sequence to be transformed
        T: Transformation matrix of shape (4, 4) to be applied to poses of
            motion
        local: Optional; Set local=True if the transformations are to be
            applied locally, relative to parent of each joint.
    """
    for pose_id in range(len(motion.poses)):
        R0, p0 = conversions.T2Rp(motion.poses[pose_id].get_root_transform())
        R1, p1 = conversions.T2Rp(T)
        if local:
            R, p = np.dot(R0, R1), p0 + np.dot(R0, p1)
        else:
            R, p = np.dot(R1, R0), p0 + p1
        motion.poses[pose_id].set_root_transform(
            conversions.Rp2T(R, p),
            local=False,
        )
    return motion
Ejemplo n.º 23
0
    def test_load_motion(self):
        # Load file
        motion = bvh.load(file=TEST_SINUSOIDAL_FILE)

        # Read pose data from all frames (in Euler angle)
        with open(TEST_SINUSOIDAL_FILE) as f:
            file_content = f.readlines()
        for frame_num, line in enumerate(file_content[-motion.num_frames():]):
            # Skip first 3 entries that store translation data, and read the
            # Euler angle data of joints
            angle_data = np.array(list(map(float,
                                           line.strip().split()))[3:]).reshape(
                                               -1, 3)
            for T, true_E in zip(motion.poses[frame_num].data, angle_data):
                R, _ = conversions.T2Rp(T)
                E = conversions.R2E(R, order="XYZ", degrees=True)
                np.testing.assert_almost_equal(E, true_E)
Ejemplo n.º 24
0
    def compute_target_pose(self, idx, action):
        agent = self._sim_agent[idx]
        char_info = agent._char_info
        ''' the current posture should be deepcopied because action will modify it '''
        if self._action_type == Env.ActionMode.Relative:
            ref_pose = copy.deepcopy(self.get_current_pose_from_motion(idx))
        else:
            ref_pose = copy.deepcopy(
                self._base_motion[idx].get_pose_by_frame(0))

        a_real = self._action_space[idx].norm_to_real(action)

        dof_cnt = 0
        for j in agent._joint_indices:
            joint_type = agent.get_joint_type(j)
            ''' Fixed joint will not be affected '''
            if joint_type == self._pb_client.JOINT_FIXED:
                continue
            ''' If the joint do not have correspondance, use the reference posture itself'''
            if char_info.bvh_map[j] == None:
                continue
            if self._action_type == Env.ActionMode.Relative:
                T = ref_pose.get_transform(char_info.bvh_map[j], local=True)
            elif self._action_type == Env.ActionMode.Absolute:
                T = ref_pose.skel.get_joint(
                    char_info.bvh_map[j]).xform_from_parent_joint
            else:
                raise NotImplementedError
            R, p = conversions.T2Rp(T)
            if joint_type == self._pb_client.JOINT_SPHERICAL:
                dR = conversions.A2R(a_real[dof_cnt:dof_cnt + 3])
                dof_cnt += 3
            elif joint_type == self._pb_client.JOINT_REVOLUTE:
                axis = agent.get_joint_axis(j)
                angle = a_real[dof_cnt:dof_cnt + 1]
                dR = conversions.A2R(axis * angle)
                dof_cnt += 1
            else:
                raise NotImplementedError
            T_new = conversions.Rp2T(np.dot(R, dR), p)
            ref_pose.set_transform(char_info.bvh_map[j],
                                   T_new,
                                   do_ortho_norm=False,
                                   local=True)

        return ref_pose
Ejemplo n.º 25
0
 def get_facing_direction_position(self, ground_height=0.0):
     R, p = conversions.T2Rp(self.get_root_transform())
     d = np.dot(R, self._char_info.v_face)
     if np.allclose(d, self._char_info.v_up_env):
         msg = \
             '\n+++++++++++++++++WARNING+++++++++++++++++++\n'+\
             'The facing direction is ill-defined ' +\
             '(i.e. parellel to the world up-vector).\n'+\
             'A random direction will be assigned for the direction\n'+\
             'Be careful if your system is sensitive to the facing direction\n'+\
             '+++++++++++++++++++++++++++++++++++++++++++\n'
         warnings.warn(msg)
         d = math.random_unit_vector()
     d = d - math.projectionOnVector(d, self._char_info.v_up_env)
     p = p - math.projectionOnVector(p, self._char_info.v_up_env)
     if ground_height != 0.0:
         p += ground_height * self._char_info.v_up_env
     return d / np.linalg.norm(d), p
Ejemplo n.º 26
0
    def test_save_motion(self):
        # Load file
        motion = bvh.load(file=TEST_SINUSOIDAL_FILE)

        with tempfile.NamedTemporaryFile() as fp:
            # Save loaded file
            bvh.save(motion, fp.name, rot_order="XYZ")
            # Reload saved file and test if it is same as reference file
            # Read pose data from all frames (in Euler angle)
            with open(TEST_SINUSOIDAL_FILE) as f:
                orig_file = f.readlines()
            with open(fp.name) as f:
                saved_file = f.readlines()
            for orig_line, saved_line in zip(
                    orig_file[-motion.num_frames():],
                    saved_file[-motion.num_frames():],
            ):
                # Skip first 3 entries that store translation data, and read
                # the Euler angle data of joints
                orig_data, saved_data = [
                    np.array(list(map(float,
                                      line.strip().split())))
                    for line in [orig_line, saved_line]
                ]
                np.testing.assert_almost_equal(orig_data, saved_data)

            # Reload saved file and test if it has the same data as original
            # motion object
            with open(TEST_SINUSOIDAL_FILE) as f:
                file_content = f.readlines()
            for frame_num, line in enumerate(
                    file_content[-motion.num_frames():]):
                # Skip first 3 entries that store translation data, and read
                # the Euler angle data of joints
                angle_data = np.array(
                    list(map(float,
                             line.strip().split()))[3:]).reshape(-1, 3)
                for T, true_E in zip(motion.poses[frame_num].data, angle_data):
                    R, _ = conversions.T2Rp(T)
                    E = conversions.R2E(R, order="XYZ", degrees=True)
                    np.testing.assert_almost_equal(E, true_E)
Ejemplo n.º 27
0
    def set_pose_by_xform(self, xform):
        assert len(xform) == len(self._char_info.bvh_map_inv)
        ''' Base '''
        Q, p = conversions.T2Qp(xform[0])
        p *= self._ref_scale

        bu.set_base_pQvw(self._pb_client, self._body_id, p, Q, None, None)
        ''' Others '''
        indices = []
        state_pos = []
        state_vel = []
        idx = -1
        for k, j in self._char_info.bvh_map_inv.items():
            idx += 1
            if idx == 0: continue
            if j is None: continue
            joint_type = self._joint_type[j]
            if joint_type == self._pb_client.JOINT_FIXED:
                continue
            T = xform[idx]
            if joint_type == self._pb_client.JOINT_SPHERICAL:
                Q, p = conversions.T2Qp(T)
                w = np.zeros(3)
                state_pos.append(Q)
                state_vel.append(w)
            elif joint_type == self._pb_client.JOINT_REVOLUTE:
                joint_axis = self.get_joint_axis(j)
                R, p = conversions.T2Rp(T)
                w = np.zeros(3)
                state_pos.append(math.project_rotation_1D(R, joint_axis))
                state_vel.append(math.project_angular_vel_1D(w, joint_axis))
            else:
                raise NotImplementedError()
            indices.append(j)

        bu.set_joint_pv(self._pb_client, self._body_id, indices, state_pos,
                        state_vel)
Ejemplo n.º 28
0
 def get_facing_direction_position(self):
     R, p = conversions.T2Rp(self.get_root_transform())
     d = np.dot(R, self.skel.v_face)
     d = d - math.projectionOnVector(d, self.skel.v_up_env)
     p = p - math.projectionOnVector(p, self.skel.v_up_env)
     return d / np.linalg.norm(d), p
    def get_task_error(self, idx, data_prev, data_next, action):
        error = {}

        sim_agent = self._sim_agent[idx]
        char_info = sim_agent._char_info

        data = data_next[idx]

        sim_root_p, sim_root_Q, sim_root_v, sim_root_w = data['sim_root_pQvw']
        sim_link_p, sim_link_Q, sim_link_v, sim_link_w = data['sim_link_pQvw']
        sim_joint_p, sim_joint_v = data['sim_joint_pv']
        sim_facing_frame = data['sim_facing_frame']
        R_sim_f, p_sim_f = conversions.T2Rp(sim_facing_frame)
        R_sim_f_inv = R_sim_f.transpose()
        sim_com, sim_com_vel = data['sim_com'], data['sim_com_vel']

        kin_root_p, kin_root_Q, kin_root_v, kin_root_w = data['kin_root_pQvw']
        kin_link_p, kin_link_Q, kin_link_v, kin_link_w = data['kin_link_pQvw']
        kin_joint_p, kin_joint_v = data['kin_joint_pv']
        kin_facing_frame = data['kin_facing_frame']
        R_kin_f, p_kin_f = conversions.T2Rp(kin_facing_frame)
        R_kin_f_inv = R_kin_f.transpose()
        kin_com, kin_com_vel = data['kin_com'], data['kin_com_vel']

        indices = range(len(sim_joint_p))

        if 'pose_pos' in self._reward_names[idx]:
            error['pose_pos'] = 0.0
            for j in indices:
                joint_type = sim_agent.get_joint_type(j)
                if joint_type == self._pb_client.JOINT_FIXED:
                    continue
                elif joint_type == self._pb_client.JOINT_SPHERICAL:
                    dQ = self._pb_client.getDifferenceQuaternion(
                        sim_joint_p[j], kin_joint_p[j])
                    _, diff_pose_pos = self._pb_client.getAxisAngleFromQuaternion(
                        dQ)
                else:
                    diff_pose_pos = sim_joint_p[j] - kin_joint_p[j]
                error['pose_pos'] += char_info.joint_weight[j] * np.dot(
                    diff_pose_pos, diff_pose_pos)
            if len(indices) > 0:
                error['pose_pos'] /= len(indices)

        if 'pose_vel' in self._reward_names[idx]:
            error['pose_vel'] = 0.0
            for j in indices:
                joint_type = sim_agent.get_joint_type(j)
                if joint_type == self._pb_client.JOINT_FIXED:
                    continue
                else:
                    diff_pose_vel = sim_joint_v[j] - kin_joint_v[j]
                error['pose_vel'] += char_info.joint_weight[j] * np.dot(
                    diff_pose_vel, diff_pose_vel)
            if len(indices) > 0:
                error['pose_vel'] /= len(indices)

        if 'ee' in self._reward_names[idx]:
            error['ee'] = 0.0

            for j in char_info.end_effector_indices:
                sim_ee_local = np.dot(R_sim_f_inv, sim_link_p[j] - p_sim_f)
                kin_ee_local = np.dot(R_kin_f_inv, kin_link_p[j] - p_kin_f)
                diff_pos = sim_ee_local - kin_ee_local
                error['ee'] += np.dot(diff_pos, diff_pos)

            if len(char_info.end_effector_indices) > 0:
                error['ee'] /= len(char_info.end_effector_indices)

        if 'root' in self._reward_names[idx]:
            diff_root_p = sim_root_p - kin_root_p
            _, diff_root_Q = self._pb_client.getAxisAngleFromQuaternion(
                self._pb_client.getDifferenceQuaternion(
                    sim_root_Q, kin_root_Q))
            diff_root_v = sim_root_v - kin_root_v
            diff_root_w = sim_root_w - kin_root_w
            error['root'] = 1.0 * np.dot(diff_root_p, diff_root_p) + \
                            0.1 * np.dot(diff_root_Q, diff_root_Q) + \
                            0.01 * np.dot(diff_root_v, diff_root_v) + \
                            0.001 * np.dot(diff_root_w, diff_root_w)

        if 'com' in self._reward_names[idx]:
            diff_com = np.dot(R_sim_f_inv, sim_com - p_sim_f) - np.dot(
                R_kin_f_inv, kin_com - p_kin_f)
            diff_com_vel = sim_com_vel - kin_com_vel
            error['com'] = 1.0 * np.dot(diff_com, diff_com) + \
                           0.1 * np.dot(diff_com_vel, diff_com_vel)

        return error
Ejemplo n.º 30
0
def compare_and_connect_edge(
    node_id,
    nodes,
    motions,
    frames_compare,
    w_joints,
    w_joint_pos,
    w_joint_vel,
    w_root_pos,
    w_root_vel,
    w_ee_pos,
    w_ee_vel,
    w_trajectory,
    diff_threshold,
    num_comparison,
    verbose,
):
    node = nodes[node_id]
    res = []
    num_nodes = len(nodes)

    motion_idx = node["motion_idx"]
    frame_end = node["frame_end"]

    for j in range(num_nodes):
        motion_idx_j = nodes[j]["motion_idx"]
        frame_start_j = nodes[j]["frame_start"]
        diff_pose = 0.0
        diff_root_ee = 0.0
        diff_trajectory = 0.0

        for k in range(0, frames_compare + 1,
                       (frames_compare + 1) // num_comparison):
            pose = motions[motion_idx].get_pose_by_frame(frame_end + k)
            vel = motions[motion_idx].get_velocity_by_frame(frame_end + k)
            pose_j = motions[motion_idx_j].get_pose_by_frame(frame_start_j + k)
            vel_j = motions[motion_idx_j].get_velocity_by_frame(frame_start_j +
                                                                k)
            if k == 0:
                T_ref = pose.get_facing_transform()
                T_ref_j = pose_j.get_facing_transform()
            diff_pose += similarity.pose_similarity(pose, pose_j, vel, vel_j,
                                                    w_joint_pos, w_joint_vel,
                                                    w_joints)
            diff_root_ee += similarity.root_ee_similarity(
                pose,
                pose_j,
                vel,
                vel_j,
                w_root_pos,
                w_root_vel,
                w_ee_pos,
                w_ee_vel,
                T_ref,
                T_ref_j,
            )
            if w_trajectory > 0.0:
                R, p = conversions.T2Rp(pose.get_facing_transform())
                R_j, p_j = conversions.T2Rp(pose_j.get_facing_transform())
                if k > 0:
                    d = np.dot(R_prev.transpose(), p - p_prev)
                    d_j = np.dot(R_j_prev.transpose(), p_j - p_j_prev)
                    d = d - d_j
                    diff_trajectory += np.dot(d, d)
                R_prev, p_prev = R, p
                R_j_prev, p_j_prev = R_j, p_j
        diff_pose /= num_comparison
        diff_root_ee /= num_comparison
        diff_trajectory /= num_comparison
        diff = diff_pose + diff_root_ee + diff_trajectory

        if diff <= diff_threshold:
            res.append((diff, node_id, j))
    return res