Пример #1
0
    def throw(self,
              pos_target,
              num=1,
              duration=2.0,
              shape=Shape.BOX,
              vel=8.0,
              r_out=2.0,
              r_in=0.2,
              mass=2.0,
              size=0.2 * np.ones(3),
              h_min=0.5):
        assert r_out > r_in
        for _ in range(num):
            d_out = math.random_unit_vector()
            d_in = math.random_unit_vector()

            p_from = pos_target + r_out * d_out

            p_projected_h = math.projectionOnVector(p_from, self.v_up_env)
            h_cliped = max(np.linalg.norm(p_projected_h), h_min)
            p_from = (p_from - p_projected_h) + h_cliped * self.v_up_env
            p_to = pos_target + r_in * d_in
            v_dir = p_to - p_from
            v_dir = v_dir / np.linalg.norm(v_dir)

            p = p_from
            Q = conversions.A2Q(math.random_unit_vector() *
                                np.random.uniform(-np.pi, np.pi))
            v = vel * v_dir
            w = np.zeros(3)
            obs = Obstacle("", duration, shape, mass, size, p, Q, v, w)
            self.launch(obs)
Пример #2
0
def append_and_blend(motion1, motion2, blend_length=0):
    assert isinstance(motion1, motion_class.Motion)
    assert isinstance(motion2, motion_class.Motion)
    assert motion1.skel.num_joints() == motion2.skel.num_joints()

    combined_motion = copy.deepcopy(motion1)
    combined_motion.name = f"{motion1.name}+{motion2.name}"

    if motion1.num_frames() == 0:
        for i in range(motion2.num_frames()):
            combined_motion.poses.append(motion2.poses[i])
            if hasattr(motion1, "velocities"):
                combined_motion.velocities.append(motion2.velocities[i])
        return combined_motion

    frame_target = motion2.time_to_frame(blend_length)
    frame_source = motion1.time_to_frame(motion1.length() - blend_length)

    # Translate and rotate motion2 to location of frame_source
    pose1 = motion1.get_pose_by_frame(frame_source)
    pose2 = motion2.get_pose_by_frame(0)

    R1, p1 = conversions.T2Rp(pose1.get_root_transform())
    R2, p2 = conversions.T2Rp(pose2.get_root_transform())

    # Translation to be applied
    dp = p1 - p2
    dp = dp - math.projectionOnVector(dp, motion1.skel.v_up_env)
    axis = motion1.skel.v_up_env

    # Rotation to be applied
    Q1 = conversions.R2Q(R1)
    Q2 = conversions.R2Q(R2)
    _, theta = quaternion.Q_closest(Q1, Q2, axis)
    dR = conversions.A2R(axis * theta)

    motion2 = translate(motion2, dp)
    motion2 = rotate(motion2, dR)

    t_total = motion1.fps * frame_source
    t_processed = 0.0
    poses_new = []
    for i in range(motion2.num_frames()):
        dt = 1 / motion2.fps
        t_total += dt
        t_processed += dt
        pose_target = motion2.get_pose_by_frame(i)
        # Blend pose for a moment
        if t_processed <= blend_length:
            alpha = t_processed / float(blend_length)
            pose_source = motion1.get_pose_by_time(t_total)
            pose_target = blend(pose_source, pose_target, alpha)
        poses_new.append(pose_target)

    del combined_motion.poses[frame_source + 1:]
    for i in range(len(poses_new)):
        combined_motion.add_one_frame(0, copy.deepcopy(poses_new[i].data))

    return combined_motion
Пример #3
0
 def get_facing_direction_position(self, ground_height=0.0):
     R, p = conversions.T2Rp(self.get_root_transform())
     d = np.dot(R, self._char_info.v_face)
     if np.allclose(d, self._char_info.v_up_env):
         msg = \
             '\n+++++++++++++++++WARNING+++++++++++++++++++\n'+\
             'The facing direction is ill-defined ' +\
             '(i.e. parellel to the world up-vector).\n'+\
             'A random direction will be assigned for the direction\n'+\
             'Be careful if your system is sensitive to the facing direction\n'+\
             '+++++++++++++++++++++++++++++++++++++++++++\n'
         warnings.warn(msg)
         d = math.random_unit_vector()
     d = d - math.projectionOnVector(d, self._char_info.v_up_env)
     p = p - math.projectionOnVector(p, self._char_info.v_up_env)
     if ground_height != 0.0:
         p += ground_height * self._char_info.v_up_env
     return d / np.linalg.norm(d), p
Пример #4
0
 def get_facing_direction_position(self):
     R, p = conversions.T2Rp(self.get_root_transform())
     d = np.dot(R, self.skel.v_face)
     d = d - math.projectionOnVector(d, self.skel.v_up_env)
     p = p - math.projectionOnVector(p, self.skel.v_up_env)
     return d / np.linalg.norm(d), p
    def render(self, rm, ground_height=0.0):
        colors = rm.COLORS_FOR_AGENTS

        rm.gl.glEnable(rm.gl.GL_LIGHTING)
        rm.gl.glEnable(rm.gl.GL_BLEND)
        rm.gl.glBlendFunc(rm.gl.GL_SRC_ALPHA, rm.gl.GL_ONE_MINUS_SRC_ALPHA)

        for i in range(self._num_agent):
            sim_agent = self._agent[i]
            char_info = self._char_info[i]
            if rm.flag['sim_model']:
                rm.gl.glEnable(rm.gl.GL_DEPTH_TEST)
                if rm.flag['shadow']:
                    rm.gl.glPushMatrix()
                    d = np.array([1, 1, 1])
                    d = d - math.projectionOnVector(d, char_info.v_up_env)
                    offset = (0.001 + ground_height) * char_info.v_up_env
                    rm.gl.glTranslatef(offset[0], offset[1], offset[2])
                    rm.gl.glScalef(d[0], d[1], d[2])
                    rm.bullet_render.render_model(self._pb_client,
                                                  sim_agent._body_id,
                                                  draw_link=True,
                                                  draw_link_info=False,
                                                  draw_joint=False,
                                                  draw_joint_geom=False,
                                                  ee_indices=None,
                                                  color=[0.5, 0.5, 0.5, 1.0],
                                                  lighting=False)
                    rm.gl.glPopMatrix()

                rm.bullet_render.render_model(
                    self._pb_client,
                    sim_agent._body_id,
                    draw_link=True,
                    draw_link_info=True,
                    draw_joint=rm.flag['joint'],
                    draw_joint_geom=True,
                    ee_indices=char_info.end_effector_indices,
                    color=colors[i])
                if rm.flag['collision'] and self._elapsed_time > 0.0:
                    rm.gl.glPushAttrib(rm.gl.GL_LIGHTING | rm.gl.GL_DEPTH_TEST
                                       | rm.gl.GL_BLEND)
                    rm.gl.glEnable(rm.gl.GL_BLEND)
                    rm.bullet_render.render_contacts(self._pb_client,
                                                     sim_agent._body_id)
                    rm.gl.glPopAttrib()
                if rm.flag['com_vel']:
                    p, Q, v, w = sim_agent.get_root_state()
                    p, v = sim_agent.get_com_and_com_vel()
                    rm.gl_render.render_arrow(p,
                                              p + v,
                                              D=0.01,
                                              color=[0, 0, 0, 1])
                if rm.flag['facing_frame']:
                    rm.gl.glPushAttrib(rm.gl.GL_LIGHTING | rm.gl.GL_DEPTH_TEST
                                       | rm.gl.GL_BLEND)
                    rm.gl.glEnable(rm.gl.GL_BLEND)
                    rm.gl_render.render_transform(
                        sim_agent.get_facing_transform(ground_height),
                        scale=0.5,
                        use_arrow=True)
                    rm.gl.glPopAttrib()

        if rm.flag['obstacle']:
            self._obs_manager.render()
Пример #6
0
def root_ee_similarity(
    pose1,
    pose2,
    vel1=None,
    vel2=None,
    w_root_pos=1.0,
    w_root_vel=1.0,
    w_ee_pos=1.0,
    w_ee_vel=1.0,
    T_ref_1=None,
    T_ref_2=None,
    auto_weight=True,
    auto_weight_sigma=0.02,
):
    """
    This computes similarity between end_effectors and root for two poses

    Parameters
    ----------
    """

    assert pose1.skel.num_end_effectors() == pose2.skel.num_end_effectors()

    if w_root_vel > 0.0 or w_ee_vel > 0.0:
        assert vel1 is not None and vel2 is not None
        assert vel1.skel.num_end_effectors() == vel2.skel.num_end_effectors()

    skel = pose1.skel

    diff_root_pos = 0.0
    diff_root_vel = 0.0
    diff_ee_pos = 0.0
    diff_ee_vel = 0.0
    """
    Differencse will be computed w.r.t. its own facing frame
    if the reference frame is not given
    """

    if T_ref_1 is None:
        R_face_1, p_face_1 = conversions.T2Rp(pose1.get_facing_transform())
    else:
        R_face_1, p_face_1 = conversions.T2Rp(T_ref_1)
    if T_ref_2 is None:
        R_face_2, p_face_2 = conversions.T2Rp(pose2.get_facing_transform())
    else:
        R_face_2, p_face_2 = conversions.T2Rp(T_ref_2)

    R_face_1_inv = R_face_1.transpose()
    R_face_2_inv = R_face_2.transpose()

    R_root_1, p_root_1 = conversions.T2Rp(pose1.get_root_transform())
    R_root_2, p_root_2 = conversions.T2Rp(pose2.get_root_transform())

    if w_root_pos > 0.0:
        p_root_1_local = np.dot(R_face_1_inv, p_root_1 - p_face_1)
        p_root_2_local = np.dot(R_face_2_inv, p_root_2 - p_face_2)

        diff_root_pos = p_root_2_local - p_root_1_local
        diff_root_pos = np.dot(diff_root_pos, diff_root_pos)
    """ Root Velocity Difference w.r.t. Facing Frame """

    if w_root_vel > 0.0:
        v_root_1 = vel1.get_linear(skel.root_joint, False, R_root_1)
        v_root_2 = vel2.get_linear(skel.root_joint, False, R_root_2)

        v_root_1_local = np.dot(R_face_1_inv, v_root_1)
        v_root_2_local = np.dot(R_face_2_inv, v_root_2)

        diff_root_vel = v_root_2_local - v_root_1_local
        diff_root_vel = np.dot(diff_root_vel, diff_root_vel)
    """ End Effector Position and Velocity Differences w.r.t. Facing Frame """

    num_ee = skel.num_end_effectors()

    if num_ee > 0:
        if w_ee_pos > 0.0 or w_ee_vel > 0.0:
            R1s, p1s = [], []
            R2s, p2s = [], []
            ee_weights = []
            for j in skel.end_effectors:
                R1, p1 = conversions.T2Rp(pose1.get_transform(j, local=False))
                R2, p2 = conversions.T2Rp(pose2.get_transform(j, local=False))
                R1s.append(R1)
                R2s.append(R2)
                p1s.append(p1)
                p2s.append(p2)
                if auto_weight:
                    h = math_ops.projectionOnVector(p1, skel.v_up_env)
                    ee_weights.append(
                        math.exp(-np.dot(h, h) / auto_weight_sigma))
                else:
                    ee_weights.append(1.0)
            ee_weights_sum = np.sum(ee_weights)
            ee_weights = [w / ee_weights_sum for w in ee_weights]
            # print('--------------------')
            # for j in range(len(skel.end_effectors)):
            #     print(skel.end_effectors[j].name, ee_weights[j])
            # print('--------------------')
            if w_ee_pos > 0.0:
                for j in range(len(skel.end_effectors)):
                    p1_local = np.dot(R_face_1_inv, p1s[j] - p_face_1)
                    p2_local = np.dot(R_face_2_inv, p2s[j] - p_face_2)
                    dp = p2_local - p1_local
                    diff_ee_pos += np.dot(dp, dp)
            if w_ee_vel > 0.0:
                for j in range(len(skel.end_effectors)):
                    v1 = vel1.get_linear(j, False, R1s[j])
                    v2 = vel2.get_linear(j, False, R2s[j])
                    v1_local = np.dot(R_face_1_inv, v1)
                    v2_local = np.dot(R_face_2_inv, v2)
                    dv = v2_local - v1_local
                    diff_ee_vel += np.dot(dv, dv)
            # diff_ee_pos /= float(num_ee)
            # diff_ee_vel /= float(num_ee)

    diff = (w_root_pos * diff_root_pos + w_root_vel * diff_root_vel +
            w_ee_pos * diff_ee_pos + w_ee_vel * diff_ee_vel)

    return diff
Пример #7
0
 def project_to_ground(self, v):
     return v - math.projectionOnVector(v, self._char_info.v_up_env)
Пример #8
0
 def get_root_height_from_ground(self, ground_height=0.0):
     p, _, _, _ = bu.get_base_pQvw(self._pb_client, self._body_id)
     vec_root_from_ground = math.projectionOnVector(
         p, self._char_info.v_up_env)
     return np.linalg.norm(vec_root_from_ground) - ground_height