def insert6DofGlobalMarker(self, T_W_M): int_position_marker = InteractiveMarker() int_position_marker.header.frame_id = 'world' int_position_marker.name = 'obj_pose_marker' int_position_marker.scale = 0.1 int_position_marker.pose = pm.toMsg(T_W_M) int_position_marker.controls.append(self.createInteractiveMarkerControl6DOF(InteractiveMarkerControl.ROTATE_AXIS,'x')); int_position_marker.controls.append(self.createInteractiveMarkerControl6DOF(InteractiveMarkerControl.ROTATE_AXIS,'y')); int_position_marker.controls.append(self.createInteractiveMarkerControl6DOF(InteractiveMarkerControl.ROTATE_AXIS,'z')); int_position_marker.controls.append(self.createInteractiveMarkerControl6DOF(InteractiveMarkerControl.MOVE_AXIS,'x')); int_position_marker.controls.append(self.createInteractiveMarkerControl6DOF(InteractiveMarkerControl.MOVE_AXIS,'y')); int_position_marker.controls.append(self.createInteractiveMarkerControl6DOF(InteractiveMarkerControl.MOVE_AXIS,'z')); self.mk_server.insert(int_position_marker, self.processFeedback) int_button_marker = InteractiveMarker() int_button_marker.header.frame_id = 'world' int_button_marker.name = 'obj_button_marker' int_button_marker.scale = 0.2 int_button_marker.pose = pm.toMsg(PyKDL.Frame()) box = self.createButtonMarkerControl(Point(0.05,0.05,0.05), Point(0.0, 0.0, 0.15) ) box.interaction_mode = InteractiveMarkerControl.BUTTON box.name = 'button' int_button_marker.controls.append( box ) self.mk_server.insert(int_button_marker, self.processFeedback) self.mk_server.applyChanges()
def step(self): if self.T_ is None: self.init_tf() return if self.recv_: self.recv_ = False pose = self.odom_.pose.pose T0 = pm.toMatrix(pm.fromMsg(pose)) # android_odom --> android # send odom -> base_link transform T = tf.transformations.concatenate_matrices(self.T_, T0, self.Ti_) frame = pm.fromMatrix(T) if self.pub_tf_: txn, qxn = pm.toTf(frame) self.tfb_.sendTransform(txn, qxn, self.odom_.header.stamp, 'odom', 'base_link') # send as msg # TODO : does not deal with twist/covariance information msg = pm.toMsg(frame) self.cvt_msg_.pose.pose = pm.toMsg(frame) self.cvt_msg_.header.frame_id = 'map' # experimental self.cvt_msg_.header.stamp = self.odom_.header.stamp self.pub_.publish(self.cvt_msg_)
def pose_callback(self, pose): # This code is based on: # https://github.com/ros-planning/navigation/blob/jade-devel\ # /amcl/src/amcl_node.cpp try: self.tf_listener.waitForTransform( 'map', # from here 'odom', # to here pose.header.stamp, rospy.Duration(1.0)) frame = posemath.fromMsg(pose.pose).Inverse() pose.pose = posemath.toMsg(frame) pose.header.frame_id = 'base_link' odom_pose = self.tf_listener.transformPose('odom', pose) frame = posemath.fromMsg(odom_pose.pose).Inverse() odom_pose.pose = posemath.toMsg(frame) self.transform_position[0] = odom_pose.pose.position.x self.transform_position[1] = odom_pose.pose.position.y self.transform_quaternion[0] = odom_pose.pose.orientation.x self.transform_quaternion[1] = odom_pose.pose.orientation.y self.transform_quaternion[2] = odom_pose.pose.orientation.z self.transform_quaternion[3] = odom_pose.pose.orientation.w except tf.Exception as e: print e print "(May not be a big deal.)"
def compute_frames(self,req): #read nominal poses, and set as initial positions self.crc.set_intial_frames(posemath.fromMsg( self.w_P_c), posemath.fromMsg(self.ee_P_m)) #do several iteration of estimation n_comp=6 residue_max=[] residue_mod=[] for i in range(n_comp): print '\ncurrent position' print self.crc.w_T_c.p residue= self.crc.compute_frames(); r2=residue.transpose()*residue residue_mod.append( num.sqrt (r2[0,0])) residue_max.append(num.max(num.abs(residue))) print '\nresidue_mod' print residue_mod print '\nresidue_max' print residue_max #put result back in parameter print '\nee_T_m' print self.crc.ee_T_m print '\nw_T_c' print self.crc.w_T_c self.ee_P_m = posemath.toMsg(self.crc.ee_T_m) self.w_P_c=posemath.toMsg(self.crc.w_T_c) return EmptyResponse();
def irp6p_multi_trajectory2(): irpos = IRPOS("IRpOS", "Irp6p", 6) irpos.move_to_motor_position([0.4, -1.5418065817051163, 0.0, 1.57, 1.57, -2.0], 10.0) irpos.move_to_motor_position([10.0, 10.0, 0.0, 10.57, 10.57, -20.0], 2.0) irpos.move_to_joint_position([0.4, -1.5418065817051163, 0.0, 1.5, 1.57, -2.0], 3.0) irpos.move_to_joint_position([0.0, -1.5418065817051163, 0.0, 1.5, 1.57, -2.0], 3.0) rot = PyKDL.Frame(PyKDL.Rotation.EulerZYZ(0.0, 1.4, 3.14), PyKDL.Vector(0.705438961242, -0.1208864692291, 1.18029263241)) rot2 = PyKDL.Frame(PyKDL.Rotation.EulerZYZ(0.3, 1.4, 3.14), PyKDL.Vector(0.705438961242, -0.1208864692291, 1.181029263241)) irpos.move_to_cartesian_pose(3.0, Pose(Point(0.705438961242, -0.1208864692291, 1.181029263241), Quaternion(0.675351045979, 0.0892025112399, 0.698321120995, 0.219753244928))) irpos.move_to_cartesian_pose(3.0,pm.toMsg(rot)) irpos.move_to_cartesian_pose(3.0,pm.toMsg(rot2)) toolParams = Pose(Point(0.0, 0.0, 0.0), Quaternion(0.0, 0.0, 0.0, 1.0)) irpos.set_tool_geometry_params(toolParams) rot = PyKDL.Frame(PyKDL.Rotation.EulerZYZ(0.0, 1.4, 3.14), PyKDL.Vector(0.705438961242, -0.1208864692291, 1.181029263241)) irpos.move_to_cartesian_pose(3.0,Pose(Point(0.705438961242, -0.1208864692291, 1.181029263241), Quaternion(0.675351045979, 0.0892025112399, 0.698321120995, 0.219753244928))) irpos.move_to_cartesian_pose(3.0,pm.toMsg(rot)) irpos.move_to_cartesian_pose(3.0,Pose(Point(0.705438961242, -0.1208864692291, 1.181029263241), Quaternion(0.63691, 0.096783, 0.75634, -0.11369))) toolParams = Pose(Point(0.0, 0.0, 0.25), Quaternion(0.0, 0.0, 0.0, 1.0)) irpos.set_tool_geometry_params(toolParams) print "Irp6p 'multi_trajectory2' test compleated"
def test_current_pose(self): """ Test the current pose method 1. create and publish tf 2. get current pose with base and ref 3. move roboter to ref 4. get current pose with ref and base 5. analyse positions """ rospy.loginfo("test_current_pose") self.robot.move(Ptp(goal=[0, 0, 0, 0, 0, 0])) # get and transform test data ref_frame = self.test_data.get_pose("Blend_1_Mid", PLANNING_GROUP_NAME) ref_frame_tf = pm.toTf(pm.fromMsg(ref_frame)) rospy.sleep(rospy.Duration.from_sec(0.5)) # init tcp_ref = [[0, 0, 0], [0, 0, 0, 1]] tcp_base = [[0, 0, 0], [0, 0, 0, 1]] time_tf = rospy.Time.now() base_frame = self.robot._robot_commander.get_planning_frame() try: self.tf.sendTransform(ref_frame_tf[0], ref_frame_tf[1], time_tf, "ref_frame", base_frame) self.tf_listener.waitForTransform(base_frame, "ref_frame", time_tf, rospy.Duration(2, 0)) rospy.sleep(rospy.Duration.from_sec(0.1)) tcp_ref = self.tf_listener.lookupTransform("ref_frame", "prbt_tcp", time_tf) tcp_base = self.tf_listener.lookupTransform(base_frame, "prbt_tcp", time_tf) except Exception as e: print e rospy.logerr("Failed to setup transforms for test!") tcp_ref_msg = pm.toMsg(pm.fromTf(tcp_ref)) tcp_base_msg = pm.toMsg(pm.fromTf(tcp_base)) # read current pose, move robot and do it again start_bf = self.robot.get_current_pose() start_rf = self.robot.get_current_pose(base="ref_frame") self.robot.move(Ptp(goal=ref_frame)) # resending tf (otherwise transform pose could read old transform to keep time close together. time_tf = rospy.Time.now() self.tf.sendTransform(ref_frame_tf[0], ref_frame_tf[1], time_tf, "ref_frame", base_frame) self.tf_listener.waitForTransform(base_frame, "ref_frame", time_tf, rospy.Duration(1, 0)) ref_frame_bf = self.robot.get_current_pose() ref_frame_rf = self.robot.get_current_pose(base="ref_frame") self._analyze_pose(tcp_base_msg, start_bf) self._analyze_pose(tcp_ref_msg, start_rf) self._analyze_pose(ref_frame, ref_frame_bf) self._analyze_pose(Pose(orientation=Quaternion(0, 0, 0, 1)), ref_frame_rf)
def TFThread(self): while not rospy.is_shutdown(): small_frame_list = [] big_frame_list = [] self_frame = self.robot_id + "/map" for tf_frame, tf in self.tf_between_robots_dict.items(): if tf_frame[:len(self_frame)] == self_frame: big_frame_list.append(tf.child_frame_id) elif tf_frame[-len(self_frame):] == self_frame: small_frame_list.append(tf.header.frame_id) small_frame_list.sort() for i in range(1, len(small_frame_list)): tf1 = self.tf_between_robots_dict[small_frame_list[i - 1] + ' to ' + self_frame] tf2 = self.tf_between_robots_dict[small_frame_list[i] + ' to ' + self_frame] pose1 = posemath.fromMsg(trans2pose(tf1)) pose2 = posemath.fromMsg(trans2pose(tf2)) tf = TransformStamped() tf.header.stamp = rospy.Time.now() tf.header.frame_id = tf1.header.frame_id tf.child_frame_id = tf2.header.frame_id tf.transform = pose2trans( posemath.toMsg(pose1 * pose2.Inverse())) print("publish tf: " + tf.header.frame_id + " to " + tf.child_frame_id) self.br.sendTransformMessage(tf) if len(small_frame_list) > 0: print("publish tf: " + small_frame_list[-1] + ' to ' + self_frame) self.br.sendTransformMessage( self.tf_between_robots_dict[small_frame_list[-1] + ' to ' + self_frame]) big_frame_list.sort() if len(big_frame_list) > 0: print("publish tf: " + self_frame + ' to ' + big_frame_list[0]) self.br.sendTransformMessage( self.tf_between_robots_dict[self_frame + ' to ' + big_frame_list[0]]) for i in range(1, len(big_frame_list)): tf1 = self.tf_between_robots_dict[self_frame + ' to ' + big_frame_list[i - 1]] tf2 = self.tf_between_robots_dict[self_frame + ' to ' + big_frame_list[i]] pose1 = posemath.fromMsg(trans2pose(tf1)) pose2 = posemath.fromMsg(trans2pose(tf2)) tf = TransformStamped() tf.header.stamp = rospy.Time.now() tf.header.frame_id = tf1.child_frame_id tf.child_frame_id = tf2.child_frame_id tf.transform = pose2trans( posemath.toMsg(pose1.Inverse() * pose2)) print("publish tf: " + tf.header.frame_id + " to " + tf.child_frame_id) self.br.sendTransformMessage(tf) time.sleep(1)
def _getCollisionObject(name, urdf, pose, operation): ''' This function takes an urdf and a TF pose and updates the collision geometry associated with the current planning scene. ''' co = CollisionObject() co.id = name co.operation = operation if len(urdf.links) > 1: rospy.logwarn( 'collison object parser does not currently support kinematic chains' ) for l in urdf.links: # Compute the link pose based on the origin if l.origin is None: link_pose = pose else: link_pose = pose * kdl.Frame(kdl.Rotation.RPY(*l.origin.rpy), kdl.Vector(*l.origin.xyz)) for c in l.collisions: # Only update the geometry if we actually need to add the # object to the collision scene. # check type of each collision tag. if isinstance(c.geometry, urdf_parser_py.urdf.Box): primitive = True if co.operation == CollisionObject.ADD: size = c.geometry.size element = SolidPrimitive() element.type = SolidPrimitive.BOX element.dimensions = list(c.geometry.size) co.primitives.append(element) elif isinstance(c.geometry, urdf_parser_py.urdf.Mesh): primitive = False if co.operation == CollisionObject.ADD: scale = (1, 1, 1) if c.geometry.scale is not None: scale = c.scale element = _loadMesh(c.geometry, scale) co.meshes.append(element) else: raise NotImplementedError( "we do not currently support geometry of type %s" % (str(type(c.geometry)))) pose = kdl.Frame(kdl.Rotation.RPY(*c.origin.rpy), kdl.Vector(*c.origin.xyz)) pose = link_pose * pose if primitive: co.primitive_poses.append(pm.toMsg(pose)) else: # was a mesh co.mesh_poses.append(pm.toMsg(pose)) return co
def set_pose_frame(self, frame1, frame2): """ :param frame: PyKDL.Frame """ msg1 = posemath.toMsg(frame1) msg2 = posemath.toMsg(frame2) self.__set_position_goal_cartesian_publish_and_wait(msg1) self.__set_position_goal_cartesian_publish_and_wait(msg2) return True
def oplus(cur_estimate, step): result = deepcopy(cur_estimate) # loop over camera's for camera, res, camera_index in zip(cur_estimate.cameras, result.cameras, [r*pose_width for r in range(len(cur_estimate.cameras))]): res.pose = posemath.toMsg(pose_oplus(posemath.fromMsg(camera.pose), step[camera_index:camera_index+pose_width])) # loop over targets for i, target in enumerate(cur_estimate.targets): target_index = (len(cur_estimate.cameras) + i) * pose_width result.targets[i] = posemath.toMsg(pose_oplus(posemath.fromMsg(target), step[target_index:target_index+pose_width])) return result
def compute_net_transform(self, base_pose): base_to_odom = self.tfl.lookupTransform("/base_footprint", "/odom_combined", rospy.Time()) bto = pm.fromTf(base_to_odom) net_t = base_pose * bto print "Setting " print pm.toMsg(net_t) self.broad.transform = pm.toTf(net_t)
def _dataToPose(self,data): ''' Get visualization information as a vector of poses for whatever object we are currently manipulating. ''' msg = PoseArray() for traj in data: for world in traj: if world['orange'] is not None: msg.poses.append(pm.toMsg(world['orange'])) msg.poses.append(pm.toMsg(world['box'])) msg.poses.append(pm.toMsg(world['trash'])) msg.poses.append(pm.toMsg(world['squeeze_area'])) return msg
def omni_callback(joint_state): global update_pub, last_button_state sendTf(marker_tf, '/marker', fixed_frame) sendTf(zero_tf, '/base', fixed_frame) sendTf(marker_ref, '/marker_ref', fixed_frame) sendTf(stylus_ref, '/stylus_ref', fixed_frame) try: update = InteractionCursorUpdate() update.pose.header = std_msgs.msg.Header() update.pose.header.stamp = rospy.Time.now() update.pose.header.frame_id = 'marker_ref' if button_clicked and last_button_state == update.GRAB: update.button_state = update.KEEP_ALIVE elif button_clicked and last_button_state == update.KEEP_ALIVE: update.button_state = update.KEEP_ALIVE elif button_clicked: update.button_state = update.GRAB elif last_button_state == update.KEEP_ALIVE: update.button_state = update.RELEASE else: update.button_state = update.NONE updateRefs() update.key_event = 0 control_tf = ((0, 0, 0), tf.transformations.quaternion_from_euler(0, 0, control_offset)) if button_clicked: # Get pose corresponding to transform between stylus reference and current position. stylus_tf = listener.lookupTransform('/stylus_ref', '/stylus', rospy.Time(0)) stylus_matrix = pm.toMatrix(pm.fromTf(stylus_tf)) control_matrix = pm.toMatrix(pm.fromTf(control_tf)) p = pm.toMsg(pm.fromMatrix(numpy.dot(control_matrix, stylus_matrix))) else: p = pm.toMsg(pm.fromTf(control_tf)) # Simply scale this up a bit to increase the workspace. workspace = 4 p.position.x = p.position.x * workspace p.position.y = p.position.y * workspace p.position.z = p.position.z * workspace update.pose.pose = p last_button_state = update.button_state # Publish feedback. update_pub.publish(update) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): rospy.logerr("Couldn't look up transform. These things happen...")
def moveEffector(self, prefix, T_B_Td, t, max_wrench, start_time=0.01, stamp=None, path_tol=None): behaviour = self.getControllerBehaviour() if behaviour != self.BEHAVOIUR_CART_IMP and behaviour != self.BEHAVOIUR_CART_IMP_FT: print "moveEffector " + prefix + ": wrong behaviour " + self.getBehaviourName(behaviour) return False self.joint_traj_active = False wrist_pose = pm.toMsg(T_B_Td) # self.br.sendTransform([wrist_pose.position.x, wrist_pose.position.y, wrist_pose.position.z], [wrist_pose.orientation.x, wrist_pose.orientation.y, wrist_pose.orientation.z, wrist_pose.orientation.w], rospy.Time.now(), "dest", "torso_base") action_trajectory_goal = CartesianTrajectoryGoal() if stamp != None: action_trajectory_goal.trajectory.header.stamp = stamp else: action_trajectory_goal.trajectory.header.stamp = rospy.Time.now() + rospy.Duration(start_time) action_trajectory_goal.trajectory.points.append(CartesianTrajectoryPoint( rospy.Duration(t), wrist_pose, Twist())) action_trajectory_goal.wrench_constraint = self.wrenchKDLtoROS(max_wrench) if path_tol != None: action_trajectory_goal.path_tolerance.position = geometry_msgs.msg.Vector3( path_tol.vel.x(), path_tol.vel.y(), path_tol.vel.z() ) action_trajectory_goal.path_tolerance.rotation = geometry_msgs.msg.Vector3( path_tol.rot.x(), path_tol.rot.y(), path_tol.rot.z() ) self.current_max_wrench = self.wrenchKDLtoROS(max_wrench) self.action_cart_traj_client[prefix].send_goal(action_trajectory_goal, feedback_cb = self.action_right_cart_traj_feedback_cb) return True
def getPlanWaypoints(self,waypoints_in_kdl_frame,q,obj=None): cartesian_path_req = GetCartesianPathRequest() cartesian_path_req.header.frame_id = self.base_link cartesian_path_req.start_state = RobotState() cartesian_path_req.start_state.joint_state.name = self.joint_names if type(q) is list: cartesian_path_req.start_state.joint_state.position = q else: cartesian_path_req.start_state.joint_state.position = q.tolist() cartesian_path_req.group_name = self.group cartesian_path_req.link_name = self.joint_names[-1] cartesian_path_req.avoid_collisions = False cartesian_path_req.max_step = 50 cartesian_path_req.jump_threshold = 0 # cartesian_path_req.path_constraints = Constraints() if obj is not None: self.updateAllowedCollisions(obj,True) cartesian_path_req.waypoints = list() for T in waypoints_in_kdl_frame: cartesian_path_req.waypoints.append(pm.toMsg(T)) res = self.cartesian_path_plan.call(cartesian_path_req) if obj is not None: self.updateAllowedCollisions(obj,False) return (res.error_code.val, res)
def execute(self, wait=True, plan_only=False): """ Plan and execute cartesian path thru specified waypoints """ waypoints = [pm.toMsg(f) for f in self._frames] (plan, frac) = self._arm.compute_cartesian_path( waypoints, eef_step=0.005, jump_threshold=5.0) if frac < 1.0: return False if plan_only: return True t = rospy.Duration() points = [] for p in plan.joint_trajectory.points: if p.time_from_start != t: points.append(p) t = p.time_from_start plan.joint_trajectory.points = points if not self._arm.execute(plan, wait): current = self._arm.get_current_joint_values() desired = plan.joint_trajectory.points[-1].positions if not np.allclose(current, desired, atol=0.02): return False return True
def callback(m): global pub global stop global data global tfListener global numberOfFootstepsInList if ready: if not stop: print('Preparing the footstep message') # Wait for transform tfListener.waitForTransform("/pelvis", "/leftCOP_Frame", rospy.Time(), rospy.Duration(4.0)) tfListener.waitForTransform("/pelvis", "/rightCOP_Frame", rospy.Time(), rospy.Duration(4.0)) # Get current robot pose pos1, rot1 = tfListener.lookupTransform("/pelvis", "/leftCOP_Frame", rospy.Time()) pos2, rot2 = tfListener.lookupTransform("/pelvis", "/rightCOP_Frame", rospy.Time()) pos = (np.array(pos1) + np.array(pos2)) * 0.5 rot = pm.transformations.quaternion_slerp(rot1, rot2, 0.5) midFeet = pm.Frame( pm.Rotation.Quaternion(rot[0], rot[1], rot[2], rot[3]), pm.Vector(pos[0], pos[1], pos[2])) pose = pm.fromMsg(m.pose.pose) * midFeet # Get footstep trajectopry from YAML message = message_converter.convert_dictionary_to_ros_message( 'controller_msgs/FootstepDataListMessage', data) # Update the footstep frames numberOfFootstepsInList = len(message.footstep_data_list) for step in message.footstep_data_list: pstep = pose * pm.Frame( pm.Rotation.Quaternion( step.orientation.x, step.orientation.y, step.orientation.z, step.orientation.w), pm.Vector(step.location.x, step.location.y, step.location.z)) msg = pm.toMsg(pstep) step.location = msg.position # Normalize orientation quat_original = np.array([ msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w ]) quat_normalized = quat_original / np.linalg.norm(quat_original) msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w = quat_normalized[ 0], quat_normalized[1], quat_normalized[ 2], quat_normalized[3] # Update step orientation step.orientation = msg.orientation print('Relocated footsteps to a local frame') print('Publishing...') pub.publish(message) stop = True print('Waiting for execution...')
def irp6p_multi_trajectory(): irpos = IRPOS("IRpOS", "Irp6p", 6) motor_trajectory = [JointTrajectoryPoint([0.4, -1.5418065817051163, 0.0, 1.57, 1.57, -2.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [], [], rospy.Duration(10.0)), JointTrajectoryPoint([10.0, 10.0, 0.0, 10.57, 10.57, -20.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [], [], rospy.Duration(12.0))] irpos.move_along_motor_trajectory(motor_trajectory) joint_trajectory = [JointTrajectoryPoint([0.4, -1.5418065817051163, 0.0, 1.5, 1.57, -2.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [], [], rospy.Duration(3.0)),JointTrajectoryPoint([0.0, -1.5418065817051163, 0.0, 1.5, 1.57, -2.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [], [], rospy.Duration(6.0))] irpos.move_along_joint_trajectory(joint_trajectory) rot = PyKDL.Frame(PyKDL.Rotation.EulerZYZ(0.0, 1.4, 3.14), PyKDL.Vector(0.705438961242, -0.1208864692291, 1.18029263241)) rot2 = PyKDL.Frame(PyKDL.Rotation.EulerZYZ(0.3, 1.4, 3.14), PyKDL.Vector(0.705438961242, -0.1208864692291, 1.181029263241)) cartesianTrajectory = [CartesianTrajectoryPoint(rospy.Duration(3.0), Pose(Point(0.705438961242, -0.1208864692291, 1.181029263241), Quaternion(0.675351045979, 0.0892025112399, 0.698321120995, 0.219753244928)), Twist()), CartesianTrajectoryPoint(rospy.Duration(6.0), pm.toMsg(rot), Twist()),CartesianTrajectoryPoint(rospy.Duration(9.0), pm.toMsg(rot2), Twist())] irpos.move_along_cartesian_trajectory(cartesianTrajectory) toolParams = Pose(Point(0.0, 0.0, 0.0), Quaternion(0.0, 0.0, 0.0, 1.0)) irpos.set_tool_geometry_params(toolParams) rot = PyKDL.Frame(PyKDL.Rotation.EulerZYZ(0.0, 1.4, 3.14), PyKDL.Vector(0.705438961242, -0.1208864692291, 1.181029263241)) cartesianTrajectory = [CartesianTrajectoryPoint(rospy.Duration(3.0), Pose(Point(0.705438961242, -0.1208864692291, 1.181029263241), Quaternion(0.675351045979, 0.0892025112399, 0.698321120995, 0.219753244928)), Twist()), CartesianTrajectoryPoint(rospy.Duration(6.0), pm.toMsg(rot), Twist()),CartesianTrajectoryPoint(rospy.Duration(9.0), Pose(Point(0.705438961242, -0.1208864692291, 1.181029263241), Quaternion(0.63691, 0.096783, 0.75634, -0.11369)), Twist())] irpos.move_along_cartesian_trajectory(cartesianTrajectory) toolParams = Pose(Point(0.0, 0.0, 0.25), Quaternion(0.0, 0.0, 0.0, 1.0)) irpos.set_tool_geometry_params(toolParams) print "Irp6p 'multi_trajectory' test compleated"
def gotimage(self, imgmsg): if imgmsg.encoding == "bayer_bggr8": imgmsg.encoding = "8UC1" img = CvBridge().imgmsg_to_cv(imgmsg) # cv.ShowImage("DataMatrix", img) # cv.WaitKey(6) self.track(img) # monocular case print self.cams if 'l' in self.cams: for (code, corners) in self.tracking.items(): model = cv.fromarray(numpy.array([[0,0,0], [.1, 0, 0], [.1, .1, 0], [0, .1, 0]], numpy.float32)) rot = cv.CreateMat(3, 1, cv.CV_32FC1) trans = cv.CreateMat(3, 1, cv.CV_32FC1) cv.FindExtrinsicCameraParams2(model, cv.fromarray(numpy.array(corners, numpy.float32)), self.cams['l'].intrinsicMatrix(), self.cams['l'].distortionCoeffs(), rot, trans) ts = geometry_msgs.msg.TransformStamped() ts.header.frame_id = imgmsg.header.frame_id ts.header.stamp = imgmsg.header.stamp ts.child_frame_id = code posemsg = pm.toMsg(pm.fromCameraParams(cv, rot, trans)) ts.transform.translation = posemsg.position ts.transform.rotation = posemsg.orientation tfm = tf.msg.tfMessage([ts]) self.pub_tf.publish(tfm)
def Transarray2G2O(g2ofname, trans_array): index = 1 prev_pose = Pose() prev_pose.orientation.w = 1 rel_pose = Pose() with open(g2ofname, "w") as g2ofile: for posetrans in trans_array.transformArray: pose = trans2pose(posetrans.transform) if index == 1: pass else: rel_pose = posemath.toMsg( (posemath.fromMsg(prev_pose).Inverse() * posemath.fromMsg(pose))) # tmp = posemath.toMsg( posemath.fromMsg(prev_pose)*posemath.fromMsg(rel_pose) ) prev_pose = pose print >> g2ofile, "EDGE_SE3:QUAT {id1} {id2} {tx} {ty} {tz} {rx} {ry} {rz} {rw} {COVAR_STR}".format( id1=posetrans.child_frame_id, id2=posetrans.header.frame_id, tx=rel_pose.position.x, ty=rel_pose.position.y, tz=rel_pose.position.z, rx=rel_pose.orientation.x, ry=rel_pose.orientation.y, rz=rel_pose.orientation.z, rw=rel_pose.orientation.w, COVAR_STR=COVAR_STR) # print >> g2ofile, "VERTEX_SE3:QUAT {pointID} {tx} {ty} {tz} {rx} {ry} {rz} {rw}".format(pointID=posetrans.header.frame_id,tx=pose.position.x,ty=pose.position.y,tz=pose.position.z, # rx = pose.orientation.x, ry = pose.orientation.y, rz = pose.orientation.z, rw = pose.orientation.w) index += 1
def move_rel_to_cartesian_pose_with_contact(self, time_from_start, rel_pose, wrench_constraint): print self.BCOLOR+"[IRPOS] Move relative to cartesian trajectory with contact"+self.ENDC self.conmanSwitch([self.robot_name+'mPoseInt', self.robot_name+'mForceTransformation'], [], True) time.sleep(0.05) actual_pose = self.get_cartesian_pose() # Transform poses to frames. actualFrame = pm.fromMsg(actual_pose) relativeFrame = pm.fromMsg(rel_pose) desiredFrame = actualFrame * relativeFrame pose = pm.toMsg(desiredFrame) cartesianGoal = CartesianTrajectoryGoal() cartesianGoal.wrench_constraint = wrench_constraint cartesianGoal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(time_from_start), pose, Twist())) cartesianGoal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.1) self.pose_client.send_goal(cartesianGoal) self.pose_client.wait_for_result() result = self.pose_client.get_result() code = self.cartesian_error_code_to_string(result.error_code) print self.BCOLOR+"[IRPOS] Result: "+str(code)+self.ENDC self.conmanSwitch([], [self.robot_name+'mPoseInt', self.robot_name+'mForceTransformation'], True)
def set_pose_frame(self, frame): """ :param frame: PyKDL.Frame """ msg = posemath.toMsg(frame) return self.__set_position_goal_cartesian_publish_and_wait(msg)
def get_new_waypoint(self,obj): try: # TODO: make the snap configurable #(trans,rot) = self.listener.lookupTransform(obj,self.endpoint,rospy.Time(0)) (eg_trans,eg_rot) = self.listener.lookupTransform(self.gripper_center,self.endpoint,rospy.Time(0)) (og_trans,og_rot) = self.listener.lookupTransform(obj,self.gripper_center,rospy.Time(0)) rospy.logwarn("gripper obj:" + str((og_trans, og_rot))) rospy.logwarn("endpoint gripper:" + str((eg_trans, eg_rot))) xyz, rpy = [], [] for dim in og_trans: # TODO: test to see if we should re-enable this if abs(dim) < 0.0001: xyz.append(0.) else: xyz.append(dim) Rog = kdl.Rotation.Quaternion(*og_rot) for dim in Rog.GetRPY(): rpy.append(np.round(dim / np.pi * 8.) * np.pi/8.) Rog_corrected = kdl.Rotation.RPY(*rpy) Vog_corrected = kdl.Vector(*xyz) Tog_corrected = kdl.Frame(Rog_corrected, Vog_corrected) rospy.logwarn(str(Tog_corrected) + ", " + str(Rog_corrected.GetRPY())) Teg = pm.fromTf((eg_trans, eg_rot)) return pm.toMsg(Tog_corrected * Teg) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): rospy.logerr('Failed to lookup transform from %s to %s'%(obj,self.endpoint)) return None
def getPlanWaypoints(self, waypoints_in_kdl_frame, q, obj=None): cartesian_path_req = GetCartesianPathRequest() cartesian_path_req.header.frame_id = self.base_link cartesian_path_req.start_state = RobotState() cartesian_path_req.start_state.joint_state.name = self.joint_names if type(q) is list: cartesian_path_req.start_state.joint_state.position = q else: cartesian_path_req.start_state.joint_state.position = q.tolist() cartesian_path_req.group_name = self.group cartesian_path_req.link_name = self.joint_names[-1] cartesian_path_req.avoid_collisions = False cartesian_path_req.max_step = 50 cartesian_path_req.jump_threshold = 0 # cartesian_path_req.path_constraints = Constraints() if obj is not None: self.updateAllowedCollisions(obj, True) cartesian_path_req.waypoints = list() for T in waypoints_in_kdl_frame: cartesian_path_req.waypoints.append(pm.toMsg(T)) res = self.cartesian_path_plan.call(cartesian_path_req) if obj is not None: self.updateAllowedCollisions(obj, False) return (res.error_code.val, res)
def transformPose(self, pose, targetFrame): with self._lock: tTargetFramePoseFrame = None for trial in range(10): self.checkPreemption() try: tTargetFramePoseFrame = self._tfListener.lookupTransform( targetFrame, pose.header.frame_id, rospy.Time(0)) break except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): rospy.sleep(0.5) if tTargetFramePoseFrame is None: raise ValueError('Could not transform pose from ' + pose.header.frame_id + ' to ' + targetFrame) # Convert poses to KDL fTargetFramePoseFrame = posemath.fromTf(tTargetFramePoseFrame) fPoseFrameChild = posemath.fromMsg(pose.pose) # Compute transformation fTargetFrameChild = fTargetFramePoseFrame * fPoseFrameChild # Convert back to ROS message poseStampedTargetFrameChild = PoseStamped() poseStampedTargetFrameChild.pose = posemath.toMsg( fTargetFrameChild) poseStampedTargetFrameChild.header.stamp = rospy.Time.now() poseStampedTargetFrameChild.header.frame_id = targetFrame return poseStampedTargetFrameChild
def set_pose(self, pos, rot, unit='rad', wait_callback=True): """ :param pos_des: position array [x,y,z] :param rot_des: rotation array [Z,Y,X euler angle] :param unit: 'rad' or 'deg' :param wait_callback: True or False """ if unit == 'deg': rot = U.deg_to_rad(rot) # limiting range of motion limit_angle = 89 * np.pi / 180 if rot[1] > limit_angle: rot[1] = limit_angle elif rot[1] < -limit_angle: rot[1] = -limit_angle if rot[2] > limit_angle: rot[2] = limit_angle elif rot[2] < -limit_angle: rot[2] = -limit_angle # set in position cartesian mode frame = self.NumpyArraytoPyKDLFrame(pos, rot) msg = posemath.toMsg(frame) print msg # go to that position by goal if wait_callback: return self.__set_position_goal_cartesian_publish_and_wait(msg) else: self.goal_reached = False self.__set_position_goal_cartesian_pub.publish(msg) return True
def project_pose(self, pose): frame = pm.fromMsg(pose) [roll, pitch, yaw] = frame.M.GetRPY() frame.M = PyKDL.Rotation.RPY(0, 0, yaw) projected = pm.toMsg(frame) projected.position.z = 0 return projected
def insert6DofGlobalMarker(self): int_position_marker = InteractiveMarker() int_position_marker.header.frame_id = 'world' int_position_marker.name = 'jar_marker' int_position_marker.scale = 0.2 int_position_marker.pose = pm.toMsg(self.T_W_J) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.ROTATE_AXIS, 'x')) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.ROTATE_AXIS, 'y')) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.ROTATE_AXIS, 'z')) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.MOVE_AXIS, 'x')) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.MOVE_AXIS, 'y')) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.MOVE_AXIS, 'z')) box = self.createAxisMarkerControl(Point(0.15, 0.015, 0.015), Point(0.0, 0.0, 0.0)) box.interaction_mode = InteractiveMarkerControl.BUTTON box.name = 'button' int_position_marker.controls.append(box) self.server.insert(int_position_marker, self.processFeedback) self.server.applyChanges()
def transformSE3(msg): global pose curPos = pm.Frame( pm.Rotation.Quaternion(msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w), pm.Vector(msg.position.x, msg.position.y, msg.position.z)) curVel = pm.Twist( pm.Vector(msg.linear_velocity.x, msg.linear_velocity.y, msg.linear_velocity.z), pm.Vector(msg.angular_velocity.x, msg.angular_velocity.y, msg.angular_velocity.z)) pos = pose * curPos vel = pose * curVel tmp = pm.toMsg(pos) msg.position = tmp.position msg.orientation = tmp.orientation # Normalize orientation quat_original = np.array([ msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w ]) quat_normalized = quat_original / np.linalg.norm(quat_original) # Update orientation msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w = quat_normalized[ 0], quat_normalized[1], quat_normalized[2], quat_normalized[3] msg.linear_velocity = Vector3(vel.vel.x(), vel.vel.y(), vel.vel.z()) msg.angular_velocity = Vector3(vel.rot.x(), vel.rot.y(), vel.rot.z())
def get_waypoints(self,frame_type,predicates,transforms,names): self.and_srv.wait_for_service() type_predicate = PredicateStatement() type_predicate.predicate = frame_type type_predicate.params = ['*','',''] res = self.and_srv([type_predicate]+predicates) print "Found matches: " + str(res.matching) #print res if (not res.found) or len(res.matching) < 1: return None poses = [] for tform in transforms: poses.append(pm.fromMsg(tform)) #print poses new_poses = [] new_names = [] for match in res.matching: try: (trans,rot) = self.listener.lookupTransform(self.world,match,rospy.Time(0)) for (pose, name) in zip(poses,names): #resp.waypoints.poses.append(pm.toMsg(pose * pm.fromTf((trans,rot)))) new_poses.append(pm.toMsg(pm.fromTf((trans,rot)) * pose)) new_names.append(match + "/" + name) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): rospy.logwarn('Could not find transform from %s to %s!'%(self.world,match)) return (new_poses, new_names)
def move_rel_to_cartesian_pose(self, time_from_start, rel_pose): print self.BCOLOR+"[IRPOS] Move relative to cartesian trajectory"+self.ENDC self.conmanSwitch([self.robot_name+'mPoseInt'], [], True) actual_pose = self.get_cartesian_pose() # Transform poses to frames. actualFrame = pm.fromMsg(actual_pose) relativeFrame = pm.fromMsg(rel_pose) desiredFrame = actualFrame * relativeFrame pose = pm.toMsg(desiredFrame) cartesianGoal = CartesianTrajectoryGoal() cartesianGoal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(time_from_start), pose, Twist())) cartesianGoal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.1) cartesianGoal.path_tolerance = CartesianTolerance(Vector3(self.CART_POS_TOLERANCE,self.CART_POS_TOLERANCE,self.CART_POS_TOLERANCE),Vector3(self.CART_ROT_TOLERANCE,self.CART_ROT_TOLERANCE,self.CART_ROT_TOLERANCE)) cartesianGoal.goal_tolerance = CartesianTolerance(Vector3(self.CART_POS_TOLERANCE,self.CART_POS_TOLERANCE,self.CART_POS_TOLERANCE),Vector3(self.CART_ROT_TOLERANCE,self.CART_ROT_TOLERANCE,self.CART_ROT_TOLERANCE)) self.pose_client.send_goal(cartesianGoal) self.pose_client.wait_for_result() result = self.pose_client.get_result() code = self.cartesian_error_code_to_string(result.error_code) print self.BCOLOR+"[IRPOS] Result: "+str(code)+self.ENDC self.conmanSwitch([], [self.robot_name+'mPoseInt'], True) return result
def get_new_waypoint(self,obj): try: (trans,rot) = self.listener.lookupTransform(obj,self.endpoint,rospy.Time(0)) return pm.toMsg(pm.fromTf((trans,rot))) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): rospy.logerr('Failed to lookup transform from %s to %s'%(obj,self.endpoint)) return None
def LoopPosearrayInitpose(transformstamped, posestampedarray_src, posestampedarray_target): tmp = 1 id_src = checkHeaderID(transformstamped.header.frame_id, posestampedarray_src) id_target = checkHeaderID(transformstamped.child_frame_id, posestampedarray_target) # if () if not (id_src > 0 and id_target > 0): print( "========================================checkHeaderID===========================" ) print( "transformstamped.header.frame_id = {} : posestampedarray_src = {}" .format(transformstamped.header.frame_id, posestampedarray_src)) print( "transformstamped.child_frame_id = {} : posestampedarray_target = {}" .format(transformstamped.child_frame_id, posestampedarray_target)) # assert(False) return None trans_r1_f1_findex = posemath.fromMsg( posestampedarray_src.poseArray[id_src].pose) trans_r1_r2_findex_findex = posemath.fromMsg( trans2pose(transformstamped.transform)).Inverse() trans_r1_r2_f1_finex = trans_r1_f1_findex * trans_r1_r2_findex_findex trans_r2_findex_f1 = posemath.fromMsg( posestampedarray_target.poseArray[id_target].pose).Inverse() trans_r1_r2_f1_f1 = trans_r1_r2_f1_finex * trans_r2_findex_f1 return posemath.toMsg(trans_r1_r2_f1_f1)
def calc_relative_pose(self, x,y,z,roll=0,pitch=0,yew=0, in_tip_frame=True): _pose=self.endpoint_pose() ######################################## # set target position trans = PyKDL.Vector(x,y,z) rot = PyKDL.Rotation.RPY(roll,pitch,yew) f2 = PyKDL.Frame(rot, trans) if in_tip_frame: # endpoint's cartesian systems _pose=posemath.toMsg(posemath.fromMsg(_pose) * f2) else: # base's cartesian systems _pose=posemath.toMsg(f2 * posemath.fromMsg(_pose)) return _pose
def transform_wrist_frame(T_tool, ft, tool_x_offset=0.0): ''' :param T_tool: desired *_gripper_tool_frame pose w.r.t. some reference frame. Can be of type kdl.Frame or geometry_msgs.Pose. :param ft: bool. True if the arm has a force-torque sensor :param tool_x_offset: (double) offset in tool length :return: T_torso_wrist. geometry_msgs.Pose type. Wrist pose w.r.t. same ref frame as T_tool ''' if ft: T_wrist_tool = kdl.Frame(kdl.Rotation.Identity(), kdl.Vector(0.216 + tool_x_offset, 0.0, 0.0)) else: T_wrist_tool = kdl.Frame(kdl.Rotation.Identity(), kdl.Vector(0.180 + tool_x_offset, 0.0, 0.0)) if type(T_tool) is Pose: T_tool_ = posemath.fromMsg(T_tool) elif type(T_tool) is tuple and len(T_tool) is 2: T_tool_ = posemath.fromTf(T_tool) else: T_tool_ = T_tool T_wrist_ = T_tool_*T_wrist_tool.Inverse() T_wrist = posemath.toMsg(T_wrist_) return T_wrist
def callback_state(self, data): # If state is not being reset proceed otherwise skip callback if self.reset.isSet(): if self.real_robot: # Convert Pose from relative to Map to relative to World frame f_r_in_map = posemath.fromMsg(data) f_r_in_world = self.world_to_map * f_r_in_map data = posemath.toMsg(f_r_in_world) x = data.position.x y = data.position.y orientation = PyKDL.Rotation.Quaternion(data.orientation.x, data.orientation.y, data.orientation.z, data.orientation.w) euler_orientation = orientation.GetRPY() yaw = euler_orientation[2] # Append Pose to Path stamped_mir_pose = PoseStamped() stamped_mir_pose.pose = data stamped_mir_pose.header.stamp = rospy.Time.now() stamped_mir_pose.header.frame_id = self.path_frame self.mir_path.poses.append(stamped_mir_pose) self.mir_exec_path.publish(self.mir_path) # Update internal Pose variable self.mir_pose = copy.deepcopy([x, y, yaw]) else: pass
def get_ik_simple(service, goal, seed, joint_names, tip_frame, tool=None): validate_quat(goal.pose.orientation) # Transforms the goal due to the tip frame if tool: tool_in_tip = tfl.transformPose(tip_frame, tool) tool_in_tip_t = pm.fromMsg(tool_in_tip.pose) goal_t = pm.fromMsg(goal.pose) #goal.pose = pm.toMsg(tool_in_tip_t.Inverse() * goal_t) #goal = PoseStamped(header = goal.header, pose = pm.toMsg(tool_in_tip_t.Inverse() * goal_t)) goal = PoseStamped(header = goal.header, pose = pm.toMsg(goal_t * tool_in_tip_t.Inverse())) req = PositionIKRequest() req.ik_link_name = tip_frame req.pose_stamped = goal req.ik_seed_state.joint_state.name = joint_names req.ik_seed_state.joint_state.position = seed error_code = 0 for i in range(10): resp = service(req, rospy.Duration(10.0)) if resp.error_code.val != 1: rospy.logerr("Error in IK: %d" % resp.error_code.val) else: return list(resp.solution.joint_state.position) return []
def insert6DofGlobalMarker(self): T_B_T = self.velma.getTf("B", "T" + self.prefix) int_position_marker = InteractiveMarker() int_position_marker.header.frame_id = 'torso_base' int_position_marker.name = self.prefix + '_arm_position_marker' int_position_marker.scale = 0.2 int_position_marker.pose = pm.toMsg(T_B_T) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.ROTATE_AXIS, 'x')) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.ROTATE_AXIS, 'y')) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.ROTATE_AXIS, 'z')) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.MOVE_AXIS, 'x')) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.MOVE_AXIS, 'y')) int_position_marker.controls.append( self.createInteractiveMarkerControl6DOF( InteractiveMarkerControl.MOVE_AXIS, 'z')) box = self.createAxisMarkerControl(Point(0.15, 0.015, 0.015), Point(0.0, 0.0, 0.0)) box.interaction_mode = InteractiveMarkerControl.BUTTON box.name = 'button' int_position_marker.controls.append(box) self.server.insert(int_position_marker, self.processFeedback) self.server.applyChanges()
def virtualObjectLeftHand(): print "Creating a virtual object attached to gripper..." # for more details refer to ROS docs for moveit_msgs/AttachedCollisionObject object1 = AttachedCollisionObject() object1.link_name = "left_HandGripLink" object1.object.header.frame_id = "left_HandGripLink" object1.object.id = "object1" object1_prim = SolidPrimitive() object1_prim.type = SolidPrimitive.CYLINDER object1_prim.dimensions=[None, None] # set initial size of the list to 2 object1_prim.dimensions[SolidPrimitive.CYLINDER_HEIGHT] = 0.23 object1_prim.dimensions[SolidPrimitive.CYLINDER_RADIUS] = 0.06 object1_pose = pm.toMsg(PyKDL.Frame(PyKDL.Rotation.RotY(math.pi/2))) object1.object.primitives.append(object1_prim) object1.object.primitive_poses.append(object1_pose) object1.object.operation = CollisionObject.ADD object1.touch_links = ['left_HandPalmLink', 'left_HandFingerOneKnuckleOneLink', 'left_HandFingerOneKnuckleTwoLink', 'left_HandFingerOneKnuckleThreeLink', 'left_HandFingerTwoKnuckleOneLink', 'left_HandFingerTwoKnuckleTwoLink', 'left_HandFingerTwoKnuckleThreeLink', 'left_HandFingerThreeKnuckleTwoLink', 'left_HandFingerThreeKnuckleThreeLink'] return object1
def execute_cb(goal): rospy.loginfo("Action server received goal") preempt_timeout = rospy.Duration(5.0) for i in range(1,5): rospy.loginfo('Detecting plug in gripper from position %i'%i) # move to joint space position rospy.loginfo("Move in joint space...") if joint_space_client.send_goal_and_wait(get_action_goal('pr2_plugs_configuration/detect_plug_in_gripper%i'%i), rospy.Duration(20.0), preempt_timeout) != GoalStatus.SUCCEEDED: rospy.logerr('Move retract in joint space failed') server.set_aborted() return # call vision plug detection rospy.loginfo("Detecting plug...") detect_plug_goal = VisionPlugDetectionGoal() detect_plug_goal.camera_name = "/r_forearm_cam" #detect_plug_goal.prior.pose = toMsg(PyKDL.Frame(PyKDL.Rotation.RPY(pi/2, 0.0, -pi/9), PyKDL.Vector(-.03, 0, 0)).Inverse()) detect_plug_goal.prior.pose = toMsg(PyKDL.Frame(PyKDL.Rotation.RPY(-pi/2, pi/9, 0.0), PyKDL.Vector(0.03, 0, -0.01))) detect_plug_goal.prior.header.stamp = rospy.Time.now() detect_plug_goal.prior.header.frame_id = "r_gripper_tool_frame" detect_plug_goal.origin_on_right = True detect_plug_goal.prior.header.stamp = rospy.Time.now() if detect_plug_client.send_goal_and_wait(detect_plug_goal, rospy.Duration(5.0), preempt_timeout) == GoalStatus.SUCCEEDED: server.set_succeeded(DetectPlugInGripperResult(detect_plug_client.get_result().plug_pose)) rospy.loginfo("Action server goal finished") return # Failure rospy.logerr("Failed to detect plug in gripper") server.set_aborted(DetectPlugInGripperResult(detect_plug_goal.prior))
def __frame_to_tfxPose(self, frame): """ Function converts a PyKDL Frame object to a tfx object """ """ We convert a PyKDL.Frame object to a ROS posemath object and then reconvert it to a tfx.canonical.CanonicalTransform object """ """:returns: tfx pose """ rosMsg = posemath.toMsg(frame) return tfx.pose(rosMsg)
def publish_tf_to_tree(tf_msg, var_frame_id, var_child_frame_id): global br, listener print("start publish tf") listener.waitForTransform(var_frame_id, tf_msg.header.frame_id, rospy.Time(), rospy.Duration(4.0)) listener.waitForTransform(tf_msg.child_frame_id, var_child_frame_id, rospy.Time(), rospy.Duration(4.0)) frame_tf_msg = listener._buffer.lookup_transform( strip_leading_slash(var_frame_id), strip_leading_slash(tf_msg.header.frame_id), rospy.Time(0)) child_frame_tf_msg = listener._buffer.lookup_transform( strip_leading_slash(tf_msg.child_frame_id), strip_leading_slash(var_child_frame_id), rospy.Time(0)) print("frame_tf_msg:" + str(frame_tf_msg)) print("child_frame_tf_msg:" + str(child_frame_tf_msg)) frame_tf = posemath.fromMsg(trackutils.trans2pose(frame_tf_msg.transform)) print("frame_tf:" + str(frame_tf)) child_frame_tf = posemath.fromMsg( trackutils.trans2pose(child_frame_tf_msg.transform)) print("child_frame_tf:" + str(child_frame_tf)) tf_kdl = posemath.fromMsg(trackutils.trans2pose(tf_msg.transform)) print("tf_kdl:" + str(tf_kdl)) tf_msg.transform = trackutils.pose2trans( posemath.toMsg(frame_tf * tf_kdl * child_frame_tf)) tf_msg.header.stamp = tf_msg.header.stamp tf_msg.header.frame_id = var_frame_id tf_msg.child_frame_id = var_child_frame_id print("publish:" + str(tf_msg)) br.sendTransformMessage(tf_msg)
def maintain_tf_tree(odom_to_scene_before=None, odom_to_scene_after=None): global br, listener, self_ID base_to_scene = TransformStamped() base_to_scene.transform = trackutils.pose2trans( posemath.toMsg( kdl.Frame(kdl.Rotation.Quaternion(x=0.5, y=-0.5, z=0.5, w=-0.5)))) base_to_scene.header.stamp = rospy.Time.now() base_to_scene.header.frame_id = "base_link{}".format(self_ID) base_to_scene.child_frame_id = "scene{}".format(self_ID) br.sendTransformMessage(base_to_scene) if odom_to_scene_before == None or odom_to_scene_after == None: map_to_odom = TransformStamped() map_to_odom.header.stamp = rospy.Time.now() map_to_odom.transform.rotation.w = 1. map_to_odom.header.frame_id = "/map{}".format(self_ID) map_to_odom.child_frame_id = "/odom{}".format(self_ID) print(map_to_odom) br.sendTransformMessage(map_to_odom) #发送map和odom的TF树 elif odom_to_scene_before == odom_to_scene_after: listener.waitForTransform("map{}".format(self_ID), "odom{}".format(self_ID), rospy.Time(), rospy.Duration(4.0)) map_to_odom = listener._buffer.lookup_transform( "map{}".format(self_ID), "odom{}".format(self_ID), rospy.Time(0)) map_to_odom.header.stamp = rospy.Time.now() br.sendTransformMessage(map_to_odom) #发送map和odom的TF树 else: listener.waitForTransform("map{}".format(self_ID), "odom{}".format(self_ID), rospy.Time(), rospy.Duration(4.0)) map_to_odom = listener._buffer.lookup_transform( "map{}".format(self_ID), "odom{}".format(self_ID), rospy.Time(0)) map_to_odom.header.stamp = rospy.Time.now() map_to_odom_kdl = posemath.fromMsg( trackutils.trans2pose(map_to_odom.transform)) init_pose = kdl.Frame( kdl.Rotation.Quaternion(x=0.5, y=-0.5, z=0.5, w=-0.5)) odom_to_scene_after_kdl = init_pose * posemath.fromMsg( odom_to_scene_after) odom_to_scene_before_kdl = init_pose * posemath.fromMsg( odom_to_scene_before) map_to_odom_kdl = map_to_odom_kdl * odom_to_scene_before_kdl * odom_to_scene_after_kdl.Inverse( ) map_to_odom.transform = trackutils.pose2trans( posemath.toMsg(map_to_odom_kdl)) br.sendTransformMessage(map_to_odom) #发送map和odom的TF树
def get_desired_cartesian_position(self): """Get the :ref:`desired cartesian position <currentvdesired>` of the robot in terms of caretsian space. :returns: the desired position of the robot in cartesian space :rtype: `PyKDL.Frame <http://docs.ros.org/diamondback/api/kdl/html/python/geometric_primitives.html>`_""" frame = self.__position_cartesian_desired tfx_pose = tfx.pose(posemath.toMsg(frame)) return tfx_pose
def forward_kinematics_cb(self, req): ''' Run forward kinematics on a joint space pose and return the result. ''' q = req.joint_state.position kdl_pose = pm.fromMatrix(self.kdl_kin.forward(q)) pose_msg = pm.toMsg(kdl_pose) response = ForwardKinematicsResponse(pose=pose_msg, ack="SUCCESS") return response
def get_world_pose(self): self.__call__() self.listener.waitForTransform("/world", self.object_name, rospy.Time(0), rospy.Duration(10)) return pm.toMsg( pm.fromTf( self.listener.lookupTransform("/world", self.object_name, rospy.Time(0))))
def moveTool(tool_frame, t): global action_tool_client tool_pose = pm.toMsg(tool_frame) action_tool_goal = CartesianTrajectoryGoal() action_tool_goal.trajectory.header.stamp = rospy.Time.now() action_tool_goal.trajectory.points.append(CartesianTrajectoryPoint( rospy.Duration(t), tool_pose, Twist())) action_tool_client.send_goal(action_tool_goal)
def handle_rgbd(self, qq): if self.triggermode==True and self.find==False: return (imgmsg, caminfo, pointcloud) = qq if not pointcloud or pointcloud._type != 'sensor_msgs/PointCloud2': return print "rgbd" img = CvBridge().imgmsg_to_cv(imgmsg, "mono8") pcm = image_geometry.PinholeCameraModel() pcm.fromCameraInfo(caminfo) self.track(CvBridge().imgmsg_to_cv(imgmsg, "rgb8")) for (code, corners) in self.tracking.items(): cx = 0.0 cy = 0.0 count = 0.0 for (x,y) in corners: cx += x cy += y count += 1.0 cx /= count cy /= count model = cv.fromarray(numpy.array([[0,0,0], [.1, 0, 0], [.1, .1, 0], [0, .1, 0]], numpy.float32)) rot = cv.CreateMat(3, 1, cv.CV_32FC1) trans = cv.CreateMat(3, 1, cv.CV_32FC1) cv.FindExtrinsicCameraParams2(model, cv.fromarray(numpy.array(corners, numpy.float32)), pcm.intrinsicMatrix(), pcm.distortionCoeffs(), rot, trans) #ts = geometry_msgs.msg.TransformStamped() #ts.header.frame_id = imgmsg.header.frame_id #ts.header.stamp = imgmsg.header.stamp #ts.child_frame_id = code #rot = cv.fromarray(rot) #trans = cv.fromarray(trans) posemsg = pm.toMsg(self.fromCameraParams(cv, rot, trans)) points = read_points(pointcloud, None, False, [(int(cx), int(cy))]) #pointcloud.data(cy*pointcloud.row_step+cx*pointcloud.point_step) print type(points) for point in points: print type(point) print point posemsg.position = point #ts.transform.rotation = posemsg.orientation #print pcm.fullProjectionMatrix() #posemsg.position = points[0] print "Pose: ", posemsg.position self.broadcast(imgmsg.header, code, posemsg)
def getposecb(self, a, b): print "Setting Pose based on world model" if((rospy.Time.now().to_sec() - b.pose.header.stamp.to_sec()) > 4.0): print "time now = ", rospy.Time.now().to_sec(), "time from pose = ", b.pose.header.stamp.to_sec() print "Time stamp of detection to old: diff=", (rospy.Time.now().to_sec() - b.pose.header.stamp.to_sec()) if(self.detection_counter <= 4): self.detection_counter += 1 return "preempted" else: self.detection_counter = 0 return "aborted" self.config_over_object.header.stamp = rospy.Time.now() self.config_over_object.pose.position.x = b.pose.pose.position.x self.config_over_object.pose.position.y = b.pose.pose.position.y #ao = PyKDL.Rotation.Quaternion(self.config_over_object.pose.orientation.x, self.config_over_object.pose.orientation.y, self.config_over_object.pose.orientation.z, self.config_over_object.pose.orientation.w) #ao = PyKDL.Rotation.Quaternion(self.config_over_object.pose.orientation.x, self.config_over_object.pose.orientation.y, self.config_over_object.pose.orientation.z, self.config_over_object.pose.orientation.w) #bo = PyKDL.Rotation.Quaternion(b.pose.pose.orientation.x, b.pose.pose.orientation.y, b.pose.pose.orientation.z, b.pose.pose.orientation.w) #c = ao #* bo#.Inverse() m_manip = pm.toMatrix( pm.fromMsg(self.config_over_object.pose) ) m_obj = pm.toMatrix( pm.fromMsg(b.pose.pose) ) import numpy from tf.transformations import * m_ori = pm.toMatrix( pm.fromTf( ((0,0,0), quaternion_from_euler(0, 0, math.radians(-20.0))) )) box_pose = pm.toMsg( pm.fromMatrix(numpy.dot(m_manip,m_obj)) ) m_test = numpy.dot(m_manip,m_obj) newrot = pm.toMsg( pm.fromMatrix(m_test)) print newrot print self.config_over_object.pose #print c.GetQuaternion() self.config_over_object.pose.orientation = box_pose.orientation #self.config_over_object.pose.orientation.y = c.GetQuaternion()[1] #self.config_over_object.pose.orientation.z = c.GetQuaternion()[2] #self.config_over_object.pose.orientation.w = c.GetQuaternion()[3] #self.config_over_object.pose.position.z = 0.30#0.430 self.config_object.header.stamp = rospy.Time.now() self.config_object.pose.position.x = b.pose.pose.position.x self.config_object.pose.position.y = b.pose.pose.position.y #self.config_object.pose.position.z = 0.2 #0.095 self.config_object.pose.orientation = box_pose.orientation
def call_planner(self, stage): self.status.planning_status = stage print(stage) if stage == ObjectStatus.PREGRASP: target_pose = self.status.pose_stamped.pose pregrasp_distance = .1 #The marker pose is defined in the torso coordinate system target_tran = pm.toMatrix(pm.fromMsg(target_pose)) hand_tran = target_tran.copy() #Align hand perpendicular to cylinder in its canonical pose hand_tran[:3,1] = target_tran[:3,2] hand_tran[:3,2] = target_tran[:3,0] hand_tran[:3,0] = target_tran[:3,1] pregrasp_tran = hand_tran.copy() #The approach direction of this gripper is -y for some reason pregrasp_tran[:3,3] = pregrasp_tran[:3,3] + pregrasp_tran[:3,1]*pregrasp_distance hand_tran_pose = pm.toMsg(pm.fromMatrix(hand_tran)) pregrasp_pose = pm.toMsg(pm.fromMatrix(pregrasp_tran)) req = PosePlanningSrv._request_class() req.TargetPoses = [pregrasp_pose, hand_tran_pose] req.ManipulatorID = int(self.status.hands == self.status.RIGHT) res = None try: print(req) res = self.pregrasp_planner_client.call(req) self.status.operating_status = self.status.PLANNING #print res self.last_plan = res.PlannedTrajectory except: res = None rospy.loginfo("Planner failed to pregrasp") self.status.operating_status = self.status.ERROR
def __move_cartesian_direct(self, end_frame): """Move the arm to the end position by passing the trajectory generator. :param end_frame: the ending `PyKDL.Frame <http://docs.ros.org/diamondback/api/kdl/html/python/geometric_primitives.html>`_ :returns: true if you had successfully move :rtype: Bool""" # set in position cartesian mode end_position = posemath.toMsg(end_frame) # go to that position directly self.__set_position_cartesian_pub.publish(end_position) return True
def publish_state(self, event): for joint, frame in self.frames.items(): msg = PoseStamped() # Populate the msg msg.pose = posemath.toMsg(frame) # Publish the msg msg.header.frame_id = self.frame_id msg.header.stamp = rospy.Time.now() self.pose_pub[joint].publish(msg) # Draw the skeleton self.draw_robot()
def plug_in_contact(ud): """Returns true if the plug is in contact with something.""" pose_base_gripper = fromMsg(TFUtil.wait_and_lookup('base_link', 'r_gripper_tool_frame').pose) pose_outlet_plug = fromMsg(ud.base_to_outlet).Inverse() * pose_base_gripper * fromMsg(ud.gripper_to_plug) # check if difference between desired and measured outlet-plug along x-axis is more than 1 cm ud.outlet_to_plug_contact = toMsg(pose_outlet_plug) if math.fabs(pose_outlet_plug.p[0] - ud.approach_offset) > 0.01: return True return False
def test_roundtrip(self): c = Frame() d = pm.fromMsg(pm.toMsg(c)) self.assertEqual(repr(c), repr(d)) d = pm.fromMatrix(pm.toMatrix(c)) self.assertEqual(repr(c), repr(d)) d = pm.fromTf(pm.toTf(c)) self.assertEqual(repr(c), repr(d))
def handle_stereo(self, qq): (lmsg, lcmsg, rmsg, rcmsg) = qq limg = CvBridge().imgmsg_to_cv(lmsg, "mono8") rimg = CvBridge().imgmsg_to_cv(rmsg, "mono8") if 0: cv.SaveImage("/tmp/l%06d.png" % self.i, limg) cv.SaveImage("/tmp/r%06d.png" % self.i, rimg) self.i += 1 scm = image_geometry.StereoCameraModel() scm.fromCameraInfo(lcmsg, rcmsg) bm = cv.CreateStereoBMState() if "wide" in rospy.resolve_name("stereo"): bm.numberOfDisparities = 160 if 0: disparity = cv.CreateMat(limg.rows, limg.cols, cv.CV_16SC1) started = time.time() cv.FindStereoCorrespondenceBM(limg, rimg, disparity, bm) print time.time() - started ok = cv.CreateMat(limg.rows, limg.cols, cv.CV_8UC1) cv.CmpS(disparity, 0, ok, cv.CV_CMP_GT) cv.ShowImage("limg", limg) cv.ShowImage("disp", ok) cv.WaitKey(6) self.track(CvBridge().imgmsg_to_cv(lmsg, "rgb8")) if len(self.tracking) == 0: print "No markers found" for code, corners in self.tracking.items(): corners3d = [] for (x, y) in corners: limr = cv.GetSubRect(limg, (0, y - bm.SADWindowSize / 2, limg.cols, bm.SADWindowSize + 1)) rimr = cv.GetSubRect(rimg, (0, y - bm.SADWindowSize / 2, rimg.cols, bm.SADWindowSize + 1)) tiny_disparity = cv.CreateMat(limr.rows, limg.cols, cv.CV_16SC1) cv.FindStereoCorrespondenceBM(limr, rimr, tiny_disparity, bm) if tiny_disparity[7, x] < 0: return corners3d.append(scm.projectPixelTo3d((x, y), tiny_disparity[7, x] / 16.0)) if 0: cv.ShowImage("d", disparity) (a, b, c, d) = [numpy.array(pt) for pt in corners3d] def normal(s, t): return (t - s) / numpy.linalg.norm(t - s) x = PyKDL.Vector(*normal(a, b)) y = PyKDL.Vector(*normal(a, d)) f = PyKDL.Frame(PyKDL.Rotation(x, y, x * y), PyKDL.Vector(*a)) msg = pm.toMsg(f) # print "%10f %10f %10f" % (msg.position.x, msg.position.y, msg.position.z) print code, msg.position.x, msg.position.y, msg.position.z self.broadcast(lmsg.header, code, msg)
def __setstate__(self, state): self.model_name = state['model_name'] self.object_name = state['object_name'] buff = StringIO.StringIO() buff.writelines(state['point_cloud_data']) buff.seek(0) self.point_cloud_data = sensor_msgs.msg.PointCloud2() self.point_cloud_data.deserialize(buff.buf) self.pose = pm.toMsg(pm.fromMatrix(state['pose'])) self.bc = ModelRecManager.tf_broadcaster self.listener = ModelRecManager.tf_listener
def joy_cb(self, msg): # Convenience side = self.side b = msg.buttons lb = self.last_buttons if (rospy.Time.now() - self.last_hand_cmd) < rospy.Duration(0.1): return # Update enabled fingers self.move_f[0] = self.move_f[0] ^ (b[self.B1[side]] and not lb[self.B1[side]]) self.move_f[1] = self.move_f[1] ^ (b[self.B2[side]] and not lb[self.B2[side]]) self.move_f[2] = self.move_f[2] ^ (b[self.B3[side]] and not lb[self.B3[side]]) self.move_spread = self.move_spread ^ (b[self.B4[side]] and not lb[self.B4[side]]) self.move_all = self.move_all ^ (b[self.B_CENTER[side]] and not lb[self.B_CENTER[side]]) # Check if the deadman is engaged if msg.axes[self.DEADMAN[side]] < 0.01: if self.deadman_engaged: self.hand_cmd.mode = [oro_barrett_msgs.msg.BHandCmd.MODE_VELOCITY] * 4 self.hand_cmd.cmd = [0.0, 0.0, 0.0, 0.0] self.hand_pub.publish(self.hand_cmd) self.last_hand_cmd = rospy.Time.now() self.deadman_engaged = False self.deadman_max = 0.0 else: self.handle_hand_cmd(msg) self.handle_cart_cmd(msg) self.last_buttons = msg.buttons self.last_axes = msg.axes # Broadcast the command if it's defined if self.cmd_frame: # Broadcast command frame tform = toTf(self.cmd_frame) self.broadcaster.sendTransform(tform[0], tform[1], rospy.Time.now(), self.cmd_frame_id, 'world') # Broadcast telemanip command telemanip_cmd = TelemanipCommand() telemanip_cmd.header.frame_id = 'world' telemanip_cmd.header.stamp = rospy.Time.now() telemanip_cmd.posetwist.pose = toMsg(self.cmd_frame) telemanip_cmd.resync_pose = msg.buttons[self.THUMB_CLICK[side]] == 1 telemanip_cmd.deadman_engaged = self.deadman_engaged if msg.axes[self.THUMB_Y[side]] > 0.5: telemanip_cmd.grasp_opening = 0.0 elif msg.axes[self.THUMB_Y[side]] < -0.5: telemanip_cmd.grasp_opening = 1.0 else: telemanip_cmd.grasp_opening = 0.5 telemanip_cmd.estop = False # TODO: add master estop control self.telemanip_cmd_pub.publish(telemanip_cmd)