def TransformAngularVelocityToLocalFrame(self, angular_velocity, orientation): """Transform the angular velocity from world frame to robot's frame. Args: angular_velocity: Angular velocity of the robot in world frame. orientation: Orientation of the robot represented as a quaternion. Returns: angular velocity of based on the given orientation. """ # Treat angular velocity as a position vector, then transform based on the # orientation given by dividing (or multiplying with inverse). # Get inverse quaternion assuming the vector is at 0,0,0 origin. tr = dp.TinySpatialTransform() tr.translation = dp.Vector3(1, 2, 3) tr.translation.set_zero() orn = dp.Quaternion(orientation[0], orientation[1], orientation[2], orientation[3]) tr.rotation = dp.Matrix3(orn) tr_inv = tr.get_inverse() rel_vel = tr.apply_inverse( dp.Vector3(angular_velocity[0], angular_velocity[1], angular_velocity[2])) #tr.print("tds trans") #print("tds rel_vel=",rel_vel) #print(tr) return vec3_to_np(rel_vel)
def main(argv): if len(argv) > 1: raise app.UsageError("Too many command-line arguments.") w = pydiffphys.TinyWorld() w.gravity = pydiffphys.Vector3(0, 0, -9.81) g = w.gravity print("g=", g.z) plane = pydiffphys.TinyPlane() sphere = pydiffphys.TinySphere(5.0) d = w.get_collision_dispatcher() mass = 0.0 b1 = pydiffphys.TinyRigidBody(mass, plane) mass = 1.0 b2 = pydiffphys.TinyRigidBody(mass, sphere) print("b.linear_velocity=", b1.linear_velocity) pos_a = pydiffphys.Vector3(0, 0, 0) orn_a = pydiffphys.Quaternion(0, 0, 0, 1) pos_b = pydiffphys.Vector3(0, 0, 50) orn_b = pydiffphys.Quaternion(0, 0, 0, 1) pose_a = pydiffphys.TinyPose(pos_a, orn_a) pose_b = pydiffphys.TinyPose(pos_b, orn_b) b1.world_pose = pose_a b2.world_pose = pose_b steps = 1000 for step in range(steps): b2.apply_gravity(g) dt = 1. / 240. b2.apply_force_impulse(dt) b2.clear_forces() bodies = [b1, b2] rb_contacts = w.compute_contacts_rigid_body(bodies, d) num_iter = 50 solver = pydiffphys.TinyConstraintSolver() for _ in range(num_iter): for contact in rb_contacts: solver.resolve_collision(contact, dt) for body in bodies: body.integrate(dt) print("step=", step, ": b2.world_pose.position=", b2.world_pose.position, ", b2.linear_velocity=", b2.linear_velocity)
def create_multi_body(mass, collision_shapes, is_floating, world): urdf = pd.TinyUrdfStructures() base_link = pd.TinyUrdfLink() base_link.link_name = "plane_link" inertial = pd.TinyUrdfInertial() inertial.mass = mass inertial.inertia_xxyyzz = pd.Vector3(mass, mass, mass) base_link.urdf_inertial = inertial base_link.urdf_collision_shapes = collision_shapes urdf.base_links = [base_link] mb = pd.TinyMultiBody(is_floating) urdf2mb = pd.UrdfToMultiBody2() res = urdf2mb.convert2(urdf, world, mb) return mb
def create_multi_body(link_name, mass, collision_shapes, visual_shapes, is_floating, mc, world): urdf = pd.TinyUrdfStructures() base_link = pd.TinyUrdfLink() base_link.link_name = link_name inertial = pd.TinyUrdfInertial() inertial.mass = mass inertial.inertia_xxyyzz = pd.Vector3(mass, mass, mass) base_link.urdf_inertial = inertial base_link.urdf_collision_shapes = collision_shapes base_link.urdf_visual_shapes = visual_shapes urdf.base_links = [base_link] mb = pd.TinyMultiBody(is_floating) urdf2mb = pd.UrdfToMultiBody2() res = urdf2mb.convert2(urdf, world, mb) b2vis = meshcat_utils_dp.convert_visuals(urdf, None, vis) return mb, b2vis
def ApplyAction(self, motor_commands, motor_control_mode): """Apply the motor commands using the motor model. Args: motor_commands: np.array. Can be motor angles, torques, hybrid commands motor_control_mode: A MotorControlMode enum. """ dp.forward_kinematics(self.mb, self.mb.q, self.mb.qd) dp.forward_dynamics(self.mb, dp.Vector3(0., 0., -10.)) mb_q = vecx_to_np(self.mb.q)[7:] mb_qd = vecx_to_np(self.mb.qd)[6:] actual_torque, observed_torque = self._motor_model.convert_to_torque( motor_commands, mb_q, mb_qd, mb_qd, motor_control_mode=motor_control_mode) #print("actual_torque=",actual_torque) for i in range(len(actual_torque)): self.mb.tau[i] = actual_torque[i]
#now convert urdf structs to diffphys urdf structs med0.convert_urdf_structures() print("conversion to TinyUrdfStructures done!") texture_path = os.path.join(pd.getDataPath(), "laikago/laikago_tex.jpg") laikago_vis = meshcat_utils_dp.convert_visuals(med0.urdf_structs, texture_path, vis) print("convert_visuals done") urdf2mb = dp.UrdfToMultiBody2() is_floating = True laikago_mb = dp.TinyMultiBody(is_floating) res = urdf2mb.convert2(med0.urdf_structs, world, laikago_mb) laikago_mb.set_base_position(dp.Vector3(3., 2., 0.7)) laikago_mb.set_base_orientation( dp.Quaternion(0.8042817254804792, 0.08692563458095628, -0.12155529396079404, 0.5751514153863713) ) #0.2474039592545229, 0.0, 0.0, 0.9689124217106448)) mb_solver = dp.TinyMultiBodyConstraintSolver() q = dp.VectorX(2) q[0] = -0.53167 q[1] = 0.30707 qd = dp.VectorX(2) qd[0] = -1 qd[1] = 0 qd_new = laikago_mb.qd #qd_new[2] = 1
def convert_vec(self, vec): vec = dp.Vector3(vec[0], vec[1], vec[2]) return vec
urdf_parser = dp.TinyUrdfParser() plane_urdf_data = urdf_parser.load_urdf("../../data/plane_implicit.urdf") plane2vis = meshcat_utils_dp.convert_visuals(plane_urdf_data, "../../data/checker_purple.png", vis, "../../data/") plane_mb = dp.TinyMultiBody(False) plane2mb = dp.UrdfToMultiBody2() res = plane2mb.convert2(plane_urdf_data, world, plane_mb) urdf_data = urdf_parser.load_urdf("../../data/laikago/laikago_toes_zup.urdf") print("robot_name=",urdf_data.robot_name) b2vis = meshcat_utils_dp.convert_visuals(urdf_data, "../../data/laikago/laikago_tex.jpg", vis, "../../data/laikago/") is_floating=True mb = dp.TinyMultiBody(is_floating) urdf2mb = dp.UrdfToMultiBody2() res = urdf2mb.convert2(urdf_data, world, mb) mb.set_base_position(dp.Vector3(0,0,0.6)) mb.set_base_orientation(dp.Quaternion(0.0, 0.0, 0.706825181105366, 0.7073882691671998)) knee_angle = -0.5 abduction_angle = 0.2 initial_poses = [abduction_angle, 0., knee_angle, abduction_angle, 0., knee_angle, abduction_angle, 0., knee_angle, abduction_angle, 0., knee_angle] qcopy = mb.q print("mb.q=",mb.q) print("qcopy=",qcopy) for q_index in range (12): qcopy[q_index+7]=initial_poses[q_index] print("qcopy=",qcopy)
inertial.inertia_xxyyzz = pd.Vector3(mass, mass, mass) base_link.urdf_inertial = inertial base_link.urdf_collision_shapes = collision_shapes urdf.base_links = [base_link] mb = pd.TinyMultiBody(is_floating) urdf2mb = pd.UrdfToMultiBody2() res = urdf2mb.convert2(urdf, world, mb) return mb world = pd.TinyWorld() collision_shape = pd.TinyUrdfCollision() collision_shape.geometry.geom_type = pd.PLANE_TYPE collision_shape.origin_xyz = pd.Vector3(0., 0., 2.) plane_mb = create_multi_body(0, [collision_shape], False, world) collision_shape = pd.TinyUrdfCollision() collision_shape.geometry.geom_type = pd.SPHERE_TYPE collision_shape.geometry.sphere.radius = 2.0 sphere_mb = create_multi_body(2.0, [collision_shape], True, world) sphere_mb.set_base_position(pd.Vector3(0., 0., 25.)) dt = 1. / 240. mb_solver = pd.TinyMultiBodyConstraintSolver() while 1: pd.forward_dynamics(sphere_mb, pd.Vector3(0., 0., -10.))
#vis = meshcat.Visualizer(zmq_url='tcp://127.0.0.1:6000') #vis.delete() urdf_parser = dp.TinyUrdfParser() plane_urdf_data = urdf_parser.load_urdf("../../data/bunny.urdf") #plane2vis = meshcat_utils_dp.convert_visuals(plane_urdf_data, "../../data/checker_purple.png", vis, "../../data/") plane_mb = dp.TinyMultiBody(False) plane2mb = dp.UrdfToMultiBody2() res = plane2mb.convert2(plane_urdf_data, world, plane_mb) colliders = plane_urdf_data.base_links[0].urdf_collision_shapes print("num_colliders=", len(colliders)) ray_from = [] ray_to = [] ray_from.append(dp.Vector3(-2, .1, 0)) ray_to.append(dp.Vector3(2, .1, 0)) caster = dp.TinyRaycast() colliders = [] if 1: collision_shape = dp.TinyUrdfCollision() collision_shape.geometry.geom_type = dp.SPHERE_TYPE collision_shape.geometry.sphere.radius = 1 colliders.append(collision_shape) collision_shape = dp.TinyUrdfCollision() collision_shape.geometry.geom_type = dp.SPHERE_TYPE collision_shape.geometry.sphere.radius = 1.5 colliders.append(collision_shape)
def joint_angles_from_link_position(self, robot, link_position, link_id, joint_ids, position_in_world_frame, base_translation=(0, 0, 0), base_rotation=(0, 0, 0, 1)): """Uses Inverse Kinematics to calculate joint angles. Args: robot: A robot instance. link_position: The (x, y, z) of the link in the body or the world frame, depending on whether the argument position_in_world_frame is true. link_id: The link id as returned from loadURDF. joint_ids: The positional index of the joints. This can be different from the joint unique ids. position_in_world_frame: Whether the input link_position is specified in the world frame or the robot's base frame. base_translation: Additional base translation. base_rotation: Additional base rotation. Returns: A list of joint angles. """ if not position_in_world_frame: #tds base_world_tr = self.mb.get_world_transform(-1) local_base_tr = dp.TinySpatialTransform() local_base_tr.translation = dp.Vector3(base_translation[0], base_translation[1], base_translation[2]) local_base_tr.rotation = dp.Matrix3( dp.Quaternion(base_rotation[0], base_rotation[1], base_rotation[2], base_rotation[3])) world_tr = base_world_tr * local_base_tr local_link_tr = dp.TinySpatialTransform() local_link_tr.rotation = dp.Matrix3(dp.Quaternion(0, 0, 0, 1)) local_link_tr.translation = dp.Vector3(link_position[0], link_position[1], link_position[2]) link_tr = world_tr * local_link_tr world_link_pos = [ link_tr.translation[0], link_tr.translation[1], link_tr.translation[2] ] else: world_link_pos = link_position ik_solver = 0 target_world_pos = dp.Vector3(world_link_pos[0], world_link_pos[1], world_link_pos[2]) #target_local_pos = self.mb.get_world_transform(-1).apply_inverse(target_world_pos) all_joint_angles2 = dp.inverse_kinematics(self.mb, link_id, target_world_pos) #print("--") #print("len(all_joint_angles)=",len(all_joint_angles)) #print("all_joint_angles=",all_joint_angles) vec = [] for i in range(all_joint_angles2.size() - 7): vec.append(all_joint_angles2[i + 7]) #print("len(all_joint_angles2)=",len(vec)) #print("all_joint_angles2=",vec) vec = np.array(vec) #diff = vec - all_joint_angles #print("diff=",diff) # Extract the relevant joint angles. joint_angles = [vec[i] for i in joint_ids] return joint_angles
def __init__(self, simulation_time_step): self.MPC_BODY_MASS = MPC_BODY_MASS self.MPC_BODY_INERTIA = MPC_BODY_INERTIA self.MPC_BODY_HEIGHT = MPC_BODY_HEIGHT self.time_step = simulation_time_step self.num_legs = NUM_LEGS self.num_motors = NUM_MOTORS self.skip_sync = 0 self.world = dp.TinyWorld() self.world.friction = 10.0 self.mb_solver = dp.TinyMultiBodyConstraintSolver() self.vis = meshcat.Visualizer(zmq_url='tcp://127.0.0.1:6000') self.vis.delete() urdf_parser = dp.TinyUrdfParser() plane_urdf_data = urdf_parser.load_urdf( "../../../data/plane_implicit.urdf") plane2vis = meshcat_utils_dp.convert_visuals( plane_urdf_data, "../../../data/checker_purple.png", self.vis, "../../../data/") self.plane_mb = dp.TinyMultiBody(False) plane2mb = dp.UrdfToMultiBody2() res = plane2mb.convert2(plane_urdf_data, self.world, self.plane_mb) self.urdf_data = urdf_parser.load_urdf( "../../../data/laikago/laikago_toes_zup_joint_order.urdf") print("robot_name=", self.urdf_data.robot_name) self.b2vis = meshcat_utils_dp.convert_visuals( self.urdf_data, "../../../data/laikago/laikago_tex.jpg", self.vis, "../../../data/laikago/") is_floating = True self.mb = dp.TinyMultiBody(is_floating) urdf2mb = dp.UrdfToMultiBody2() self.res = urdf2mb.convert2(self.urdf_data, self.world, self.mb) self.mb.set_base_position(dp.Vector3(0, 0, 0.6)) knee_angle = -0.5 abduction_angle = 0.2 self.initial_poses = [ abduction_angle, 0., knee_angle, abduction_angle, 0., knee_angle, abduction_angle, 0., knee_angle, abduction_angle, 0., knee_angle ] qcopy = self.mb.q print("mb.q=", self.mb.q) print("qcopy=", qcopy) for q_index in range(12): qcopy[q_index + 7] = self.initial_poses[q_index] print("qcopy=", qcopy) self.mb.set_q(qcopy) print("2 mb.q=", self.mb.q) print("self.mb.links=", self.mb.links) print("TDS:") for i in range(len(self.mb.links)): print("i=", i) l = self.mb.links[i] #print("l.link_name=", l.link_name) print("l.joint_name=", l.joint_name) dp.forward_kinematics(self.mb, self.mb.q, self.mb.qd) self._BuildJointNameToIdDict() self._BuildUrdfIds() self._BuildMotorIdList() self.ResetPose() self._motor_enabled_list = [True] * self.num_motors self._step_counter = 0 self._state_action_counter = 0 self._motor_offset = np.array([0.] * 12) self._motor_direction = np.array([1.] * 12) self.ReceiveObservation() self._kp = self.GetMotorPositionGains() self._kd = self.GetMotorVelocityGains() self._motor_model = LaikagoMotorModel( kp=self._kp, kd=self._kd, motor_control_mode=MOTOR_CONTROL_HYBRID) self.ReceiveObservation() self.mb.set_base_position( dp.Vector3(START_POS[0], START_POS[1], START_POS[2])) self.mb.set_base_orientation( dp.Quaternion(START_ORN[0], START_ORN[1], START_ORN[2], START_ORN[3])) dp.forward_kinematics(self.mb, self.mb.q, self.mb.qd) meshcat_utils_dp.sync_visual_transforms(self.mb, self.b2vis, self.vis) self._SettleDownForReset(reset_time=1.0)