def TransformAngularVelocityToLocalFrame(self, angular_velocity,
                                             orientation):
        """Transform the angular velocity from world frame to robot's frame.

    Args:
      angular_velocity: Angular velocity of the robot in world frame.
      orientation: Orientation of the robot represented as a quaternion.

    Returns:
      angular velocity of based on the given orientation.
    """

        # Treat angular velocity as a position vector, then transform based on the
        # orientation given by dividing (or multiplying with inverse).
        # Get inverse quaternion assuming the vector is at 0,0,0 origin.
        tr = dp.TinySpatialTransform()
        tr.translation = dp.Vector3(1, 2, 3)
        tr.translation.set_zero()
        orn = dp.Quaternion(orientation[0], orientation[1], orientation[2],
                            orientation[3])
        tr.rotation = dp.Matrix3(orn)
        tr_inv = tr.get_inverse()
        rel_vel = tr.apply_inverse(
            dp.Vector3(angular_velocity[0], angular_velocity[1],
                       angular_velocity[2]))
        #tr.print("tds trans")
        #print("tds rel_vel=",rel_vel)

        #print(tr)

        return vec3_to_np(rel_vel)
Beispiel #2
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError("Too many command-line arguments.")

    w = pydiffphys.TinyWorld()

    w.gravity = pydiffphys.Vector3(0, 0, -9.81)
    g = w.gravity
    print("g=", g.z)
    plane = pydiffphys.TinyPlane()
    sphere = pydiffphys.TinySphere(5.0)
    d = w.get_collision_dispatcher()
    mass = 0.0
    b1 = pydiffphys.TinyRigidBody(mass, plane)
    mass = 1.0
    b2 = pydiffphys.TinyRigidBody(mass, sphere)

    print("b.linear_velocity=", b1.linear_velocity)
    pos_a = pydiffphys.Vector3(0, 0, 0)
    orn_a = pydiffphys.Quaternion(0, 0, 0, 1)
    pos_b = pydiffphys.Vector3(0, 0, 50)
    orn_b = pydiffphys.Quaternion(0, 0, 0, 1)

    pose_a = pydiffphys.TinyPose(pos_a, orn_a)
    pose_b = pydiffphys.TinyPose(pos_b, orn_b)
    b1.world_pose = pose_a
    b2.world_pose = pose_b

    steps = 1000
    for step in range(steps):
        b2.apply_gravity(g)
        dt = 1. / 240.
        b2.apply_force_impulse(dt)
        b2.clear_forces()
        bodies = [b1, b2]
        rb_contacts = w.compute_contacts_rigid_body(bodies, d)
        num_iter = 50
        solver = pydiffphys.TinyConstraintSolver()
        for _ in range(num_iter):
            for contact in rb_contacts:
                solver.resolve_collision(contact, dt)
        for body in bodies:
            body.integrate(dt)

        print("step=", step, ": b2.world_pose.position=",
              b2.world_pose.position, ", b2.linear_velocity=",
              b2.linear_velocity)
Beispiel #3
0
def sync_visual_transforms(mb, b2vis, vis):

    for key in b2vis:

        v = b2vis[key]
        #print("v.link_index=",v.link_index)
        link_world_trans = mb.get_world_transform(v.link_index)

        vpos = v.origin_xyz
        vorn = dp.Quaternion(0.0, 0.0, 0.0, 1.0)
        vorn.set_euler_rpy(v.origin_rpy)
        trv = dp.TinySpatialTransform()
        trv.translation = vpos
        trv.rotation = dp.Matrix3(vorn)
        #print("link_world_trans.x=",link_world_trans.translation.x)
        #print("link_world_trans.y=",link_world_trans.translation.y)
        #print("link_world_trans.z=",link_world_trans.translation.z)
        gfx_world_trans = link_world_trans * trv  #trvi

        rot = gfx_world_trans.rotation
        #print("gfx_world_trans.translation=",gfx_world_trans.translation)
        transpose = False
        if transpose:
            mat = [[
                rot.get_row(0).x,
                rot.get_row(1).x,
                rot.get_row(2).x, gfx_world_trans.translation.x
            ],
                   [
                       rot.get_row(0).y,
                       rot.get_row(1).y,
                       rot.get_row(2).y, gfx_world_trans.translation.y
                   ],
                   [
                       rot.get_row(0).z,
                       rot.get_row(1).z,
                       rot.get_row(2).z, gfx_world_trans.translation.z
                   ], [0., 0., 0., 1.]]
        else:
            mat = [[
                rot.get_row(0).x,
                rot.get_row(0).y,
                rot.get_row(0).z, gfx_world_trans.translation.x
            ],
                   [
                       rot.get_row(1).x,
                       rot.get_row(1).y,
                       rot.get_row(1).z, gfx_world_trans.translation.y
                   ],
                   [
                       rot.get_row(2).x,
                       rot.get_row(2).y,
                       rot.get_row(2).z, gfx_world_trans.translation.z
                   ], [0., 0., 0., 1.]]

        vis[v.vis_name].set_transform(np.array(mat))
print("conversion to TinyUrdfStructures done!")

texture_path = os.path.join(pd.getDataPath(), "laikago/laikago_tex.jpg")

laikago_vis = meshcat_utils_dp.convert_visuals(med0.urdf_structs, texture_path,
                                               vis)
print("convert_visuals done")

urdf2mb = dp.UrdfToMultiBody2()
is_floating = True
laikago_mb = dp.TinyMultiBody(is_floating)

res = urdf2mb.convert2(med0.urdf_structs, world, laikago_mb)
laikago_mb.set_base_position(dp.Vector3(3., 2., 0.7))
laikago_mb.set_base_orientation(
    dp.Quaternion(0.8042817254804792, 0.08692563458095628,
                  -0.12155529396079404, 0.5751514153863713)
)  #0.2474039592545229, 0.0, 0.0, 0.9689124217106448))
mb_solver = dp.TinyMultiBodyConstraintSolver()

q = dp.VectorX(2)
q[0] = -0.53167
q[1] = 0.30707
qd = dp.VectorX(2)
qd[0] = -1
qd[1] = 0

qd_new = laikago_mb.qd
#qd_new[2] = 1
#laikago_mb.qd= qd_new

print("laikago_mb.q=", laikago_mb.q)
plane_urdf_data = urdf_parser.load_urdf("../../data/plane_implicit.urdf")
plane2vis = meshcat_utils_dp.convert_visuals(plane_urdf_data, "../../data/checker_purple.png", vis, "../../data/")
plane_mb = dp.TinyMultiBody(False)
plane2mb = dp.UrdfToMultiBody2()
res = plane2mb.convert2(plane_urdf_data, world, plane_mb)

urdf_data = urdf_parser.load_urdf("../../data/laikago/laikago_toes_zup.urdf")
print("robot_name=",urdf_data.robot_name)
b2vis = meshcat_utils_dp.convert_visuals(urdf_data, "../../data/laikago/laikago_tex.jpg", vis, "../../data/laikago/")
is_floating=True
mb = dp.TinyMultiBody(is_floating)
urdf2mb = dp.UrdfToMultiBody2()
res = urdf2mb.convert2(urdf_data, world, mb)
mb.set_base_position(dp.Vector3(0,0,0.6))

mb.set_base_orientation(dp.Quaternion(0.0, 0.0, 0.706825181105366, 0.7073882691671998))

knee_angle = -0.5
abduction_angle = 0.2

initial_poses = [abduction_angle, 0., knee_angle, abduction_angle, 0., knee_angle,
                 abduction_angle, 0., knee_angle, abduction_angle, 0., knee_angle]

qcopy = mb.q
print("mb.q=",mb.q)
print("qcopy=",qcopy)
for q_index in range (12):
  qcopy[q_index+7]=initial_poses[q_index]
print("qcopy=",qcopy)
mb.set_q(qcopy)
print("2 mb.q=",mb.q)
    def joint_angles_from_link_position(self,
                                        robot,
                                        link_position,
                                        link_id,
                                        joint_ids,
                                        position_in_world_frame,
                                        base_translation=(0, 0, 0),
                                        base_rotation=(0, 0, 0, 1)):
        """Uses Inverse Kinematics to calculate joint angles.

    Args:
      robot: A robot instance.
      link_position: The (x, y, z) of the link in the body or the world frame,
        depending on whether the argument position_in_world_frame is true.
      link_id: The link id as returned from loadURDF.
      joint_ids: The positional index of the joints. This can be different from
        the joint unique ids.
      position_in_world_frame: Whether the input link_position is specified
        in the world frame or the robot's base frame.
      base_translation: Additional base translation.
      base_rotation: Additional base rotation.

    Returns:
      A list of joint angles.
    """

        if not position_in_world_frame:
            #tds
            base_world_tr = self.mb.get_world_transform(-1)
            local_base_tr = dp.TinySpatialTransform()
            local_base_tr.translation = dp.Vector3(base_translation[0],
                                                   base_translation[1],
                                                   base_translation[2])
            local_base_tr.rotation = dp.Matrix3(
                dp.Quaternion(base_rotation[0], base_rotation[1],
                              base_rotation[2], base_rotation[3]))
            world_tr = base_world_tr * local_base_tr
            local_link_tr = dp.TinySpatialTransform()
            local_link_tr.rotation = dp.Matrix3(dp.Quaternion(0, 0, 0, 1))
            local_link_tr.translation = dp.Vector3(link_position[0],
                                                   link_position[1],
                                                   link_position[2])
            link_tr = world_tr * local_link_tr
            world_link_pos = [
                link_tr.translation[0], link_tr.translation[1],
                link_tr.translation[2]
            ]

        else:
            world_link_pos = link_position

        ik_solver = 0

        target_world_pos = dp.Vector3(world_link_pos[0], world_link_pos[1],
                                      world_link_pos[2])
        #target_local_pos = self.mb.get_world_transform(-1).apply_inverse(target_world_pos)

        all_joint_angles2 = dp.inverse_kinematics(self.mb, link_id,
                                                  target_world_pos)
        #print("--")
        #print("len(all_joint_angles)=",len(all_joint_angles))
        #print("all_joint_angles=",all_joint_angles)

        vec = []
        for i in range(all_joint_angles2.size() - 7):
            vec.append(all_joint_angles2[i + 7])
        #print("len(all_joint_angles2)=",len(vec))
        #print("all_joint_angles2=",vec)
        vec = np.array(vec)
        #diff = vec - all_joint_angles
        #print("diff=",diff)
        # Extract the relevant joint angles.
        joint_angles = [vec[i] for i in joint_ids]
        return joint_angles
    def __init__(self, simulation_time_step):
        self.MPC_BODY_MASS = MPC_BODY_MASS
        self.MPC_BODY_INERTIA = MPC_BODY_INERTIA
        self.MPC_BODY_HEIGHT = MPC_BODY_HEIGHT

        self.time_step = simulation_time_step

        self.num_legs = NUM_LEGS
        self.num_motors = NUM_MOTORS

        self.skip_sync = 0
        self.world = dp.TinyWorld()
        self.world.friction = 10.0

        self.mb_solver = dp.TinyMultiBodyConstraintSolver()
        self.vis = meshcat.Visualizer(zmq_url='tcp://127.0.0.1:6000')
        self.vis.delete()

        urdf_parser = dp.TinyUrdfParser()

        plane_urdf_data = urdf_parser.load_urdf(
            "../../../data/plane_implicit.urdf")
        plane2vis = meshcat_utils_dp.convert_visuals(
            plane_urdf_data, "../../../data/checker_purple.png", self.vis,
            "../../../data/")
        self.plane_mb = dp.TinyMultiBody(False)
        plane2mb = dp.UrdfToMultiBody2()
        res = plane2mb.convert2(plane_urdf_data, self.world, self.plane_mb)

        self.urdf_data = urdf_parser.load_urdf(
            "../../../data/laikago/laikago_toes_zup_joint_order.urdf")
        print("robot_name=", self.urdf_data.robot_name)
        self.b2vis = meshcat_utils_dp.convert_visuals(
            self.urdf_data, "../../../data/laikago/laikago_tex.jpg", self.vis,
            "../../../data/laikago/")
        is_floating = True
        self.mb = dp.TinyMultiBody(is_floating)
        urdf2mb = dp.UrdfToMultiBody2()
        self.res = urdf2mb.convert2(self.urdf_data, self.world, self.mb)
        self.mb.set_base_position(dp.Vector3(0, 0, 0.6))

        knee_angle = -0.5
        abduction_angle = 0.2

        self.initial_poses = [
            abduction_angle, 0., knee_angle, abduction_angle, 0., knee_angle,
            abduction_angle, 0., knee_angle, abduction_angle, 0., knee_angle
        ]

        qcopy = self.mb.q
        print("mb.q=", self.mb.q)
        print("qcopy=", qcopy)
        for q_index in range(12):
            qcopy[q_index + 7] = self.initial_poses[q_index]
        print("qcopy=", qcopy)
        self.mb.set_q(qcopy)
        print("2 mb.q=", self.mb.q)
        print("self.mb.links=", self.mb.links)
        print("TDS:")
        for i in range(len(self.mb.links)):
            print("i=", i)
            l = self.mb.links[i]
            #print("l.link_name=", l.link_name)
            print("l.joint_name=", l.joint_name)
        dp.forward_kinematics(self.mb, self.mb.q, self.mb.qd)

        self._BuildJointNameToIdDict()
        self._BuildUrdfIds()
        self._BuildMotorIdList()
        self.ResetPose()
        self._motor_enabled_list = [True] * self.num_motors
        self._step_counter = 0
        self._state_action_counter = 0
        self._motor_offset = np.array([0.] * 12)
        self._motor_direction = np.array([1.] * 12)
        self.ReceiveObservation()
        self._kp = self.GetMotorPositionGains()
        self._kd = self.GetMotorVelocityGains()
        self._motor_model = LaikagoMotorModel(
            kp=self._kp, kd=self._kd, motor_control_mode=MOTOR_CONTROL_HYBRID)

        self.ReceiveObservation()
        self.mb.set_base_position(
            dp.Vector3(START_POS[0], START_POS[1], START_POS[2]))
        self.mb.set_base_orientation(
            dp.Quaternion(START_ORN[0], START_ORN[1], START_ORN[2],
                          START_ORN[3]))

        dp.forward_kinematics(self.mb, self.mb.q, self.mb.qd)
        meshcat_utils_dp.sync_visual_transforms(self.mb, self.b2vis, self.vis)

        self._SettleDownForReset(reset_time=1.0)