def pose_callback(self, pose): # This code is based on: # https://github.com/ros-planning/navigation/blob/jade-devel\ # /amcl/src/amcl_node.cpp try: self.tf_listener.waitForTransform( 'map', # from here 'odom', # to here pose.header.stamp, rospy.Duration(1.0)) frame = posemath.fromMsg(pose.pose).Inverse() pose.pose = posemath.toMsg(frame) pose.header.frame_id = 'base_link' odom_pose = self.tf_listener.transformPose('odom', pose) frame = posemath.fromMsg(odom_pose.pose).Inverse() odom_pose.pose = posemath.toMsg(frame) self.transform_position[0] = odom_pose.pose.position.x self.transform_position[1] = odom_pose.pose.position.y self.transform_quaternion[0] = odom_pose.pose.orientation.x self.transform_quaternion[1] = odom_pose.pose.orientation.y self.transform_quaternion[2] = odom_pose.pose.orientation.z self.transform_quaternion[3] = odom_pose.pose.orientation.w except tf.Exception as e: print e print "(May not be a big deal.)"
def arTagCallback(self, msg): markers = msg.markers main_tag_flag = False for i in xrange(len(markers)): if markers[i].id > self.max_idx: continue if markers[i].id == self.main_tag_id: main_tag_flag = True self.main_tag_frame = posemath.fromMsg(markers[i].pose.pose)*self.z_neg90_frame if self.main_tag_frame.p.Norm() > 2.0: return if main_tag_flag == False: return for i in xrange(len(markers)): if markers[i].id > self.max_idx: continue if markers[i].id != self.main_tag_id: tag_id = markers[i].id tag_frame = posemath.fromMsg(markers[i].pose.pose)*self.z_neg90_frame # position filtering if (self.main_tag_frame.p - tag_frame.p).Norm() > self.pos_thres : return frame_diff = self.main_tag_frame.Inverse()*tag_frame self.updateFrames(tag_id, frame_diff)
def read_tfs(self,req): #marker w.r.t. camera\print ok=True #read target w.r.t. camera w_P_ee=self.current_pose(self.robot_ee_frame_name,self.base_frame_name) if(w_P_ee==0): ok=False c_P_m=self.current_pose(self.marker_frame_name,self.camera_frame_name) if(c_P_m==0): ok=False #ee w.r.t. base if ok: print self.base_frame_name+" -> "+self.robot_ee_frame_name print w_P_ee print self.camera_frame_name + " -> " + self.marker_frame_name print c_P_m #save data safe_pose_to_file(self.f,w_P_ee) safe_pose_to_file(self.f,c_P_m) self.crc.store_frames(posemath.fromMsg( w_P_ee),posemath.fromMsg(c_P_m)) print "saved so far" print len(self.crc._w_T_ee) else: print "error in retrieving a frame" return EmptyResponse();
def move_rel_to_cartesian_pose(self, time_from_start, rel_pose): print self.BCOLOR+"[IRPOS] Move relative to cartesian trajectory"+self.ENDC self.conmanSwitch([self.robot_name+'mPoseInt'], [], True) actual_pose = self.get_cartesian_pose() # Transform poses to frames. actualFrame = pm.fromMsg(actual_pose) relativeFrame = pm.fromMsg(rel_pose) desiredFrame = actualFrame * relativeFrame pose = pm.toMsg(desiredFrame) cartesianGoal = CartesianTrajectoryGoal() cartesianGoal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(time_from_start), pose, Twist())) cartesianGoal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.1) cartesianGoal.path_tolerance = CartesianTolerance(Vector3(self.CART_POS_TOLERANCE,self.CART_POS_TOLERANCE,self.CART_POS_TOLERANCE),Vector3(self.CART_ROT_TOLERANCE,self.CART_ROT_TOLERANCE,self.CART_ROT_TOLERANCE)) cartesianGoal.goal_tolerance = CartesianTolerance(Vector3(self.CART_POS_TOLERANCE,self.CART_POS_TOLERANCE,self.CART_POS_TOLERANCE),Vector3(self.CART_ROT_TOLERANCE,self.CART_ROT_TOLERANCE,self.CART_ROT_TOLERANCE)) self.pose_client.send_goal(cartesianGoal) self.pose_client.wait_for_result() result = self.pose_client.get_result() code = self.cartesian_error_code_to_string(result.error_code) print self.BCOLOR+"[IRPOS] Result: "+str(code)+self.ENDC self.conmanSwitch([], [self.robot_name+'mPoseInt'], True) return result
def arTagCallback(self, msg): markers = msg.markers main_tag_flag = False for i in xrange(len(markers)): if markers[i].id > self.max_idx: continue if markers[i].id == self.main_tag_id: main_tag_flag = True self.main_tag_frame = posemath.fromMsg( markers[i].pose.pose) * self.z_neg90_frame if self.main_tag_frame.p.Norm() > 2.0: return if main_tag_flag == False: return for i in xrange(len(markers)): if markers[i].id > self.max_idx: continue if markers[i].id != self.main_tag_id: tag_id = markers[i].id tag_frame = posemath.fromMsg( markers[i].pose.pose) * self.z_neg90_frame # position filtering if (self.main_tag_frame.p - tag_frame.p).Norm() > self.pos_thres: return frame_diff = self.main_tag_frame.Inverse() * tag_frame self.updateFrames(tag_id, frame_diff)
def compute_frames(self,req): #read nominal poses, and set as initial positions self.crc.set_intial_frames(posemath.fromMsg( self.w_P_c), posemath.fromMsg(self.ee_P_m)) #do several iteration of estimation n_comp=6 residue_max=[] residue_mod=[] for i in range(n_comp): print '\ncurrent position' print self.crc.w_T_c.p residue= self.crc.compute_frames(); r2=residue.transpose()*residue residue_mod.append( num.sqrt (r2[0,0])) residue_max.append(num.max(num.abs(residue))) print '\nresidue_mod' print residue_mod print '\nresidue_max' print residue_max #put result back in parameter print '\nee_T_m' print self.crc.ee_T_m print '\nw_T_c' print self.crc.w_T_c self.ee_P_m = posemath.toMsg(self.crc.ee_T_m) self.w_P_c=posemath.toMsg(self.crc.w_T_c) return EmptyResponse();
def get_ik_simple(service, goal, seed, joint_names, tip_frame, tool=None): validate_quat(goal.pose.orientation) # Transforms the goal due to the tip frame if tool: tool_in_tip = tfl.transformPose(tip_frame, tool) tool_in_tip_t = pm.fromMsg(tool_in_tip.pose) goal_t = pm.fromMsg(goal.pose) #goal.pose = pm.toMsg(tool_in_tip_t.Inverse() * goal_t) #goal = PoseStamped(header = goal.header, pose = pm.toMsg(tool_in_tip_t.Inverse() * goal_t)) goal = PoseStamped(header = goal.header, pose = pm.toMsg(goal_t * tool_in_tip_t.Inverse())) req = PositionIKRequest() req.ik_link_name = tip_frame req.pose_stamped = goal req.ik_seed_state.joint_state.name = joint_names req.ik_seed_state.joint_state.position = seed error_code = 0 for i in range(10): resp = service(req, rospy.Duration(10.0)) if resp.error_code.val != 1: rospy.logerr("Error in IK: %d" % resp.error_code.val) else: return list(resp.solution.joint_state.position) return []
def Transarray2G2O(g2ofname, trans_array): index = 1 prev_pose = Pose() prev_pose.orientation.w = 1 rel_pose = Pose() with open(g2ofname, "w") as g2ofile: for posetrans in trans_array.transformArray: pose = trans2pose(posetrans.transform) if index == 1: pass else: rel_pose = posemath.toMsg( (posemath.fromMsg(prev_pose).Inverse() * posemath.fromMsg(pose))) # tmp = posemath.toMsg( posemath.fromMsg(prev_pose)*posemath.fromMsg(rel_pose) ) prev_pose = pose print >> g2ofile, "EDGE_SE3:QUAT {id1} {id2} {tx} {ty} {tz} {rx} {ry} {rz} {rw} {COVAR_STR}".format( id1=posetrans.child_frame_id, id2=posetrans.header.frame_id, tx=rel_pose.position.x, ty=rel_pose.position.y, tz=rel_pose.position.z, rx=rel_pose.orientation.x, ry=rel_pose.orientation.y, rz=rel_pose.orientation.z, rw=rel_pose.orientation.w, COVAR_STR=COVAR_STR) # print >> g2ofile, "VERTEX_SE3:QUAT {pointID} {tx} {ty} {tz} {rx} {ry} {rz} {rw}".format(pointID=posetrans.header.frame_id,tx=pose.position.x,ty=pose.position.y,tz=pose.position.z, # rx = pose.orientation.x, ry = pose.orientation.y, rz = pose.orientation.z, rw = pose.orientation.w) index += 1
def read_tfs(self,req): #marker w.r.t. camera\print ok=True #read target w.r.t. camera w_P_ee=self.current_pose(self.robot_ee_frame_name,self.base_frame_name) if(w_P_ee==0): ok=False c_P_m=self.current_pose(self.marker_frame_name,self.camera_frame_name) if(c_P_m==0): ok=False #ee w.r.t. base if ok: print self.base_frame_name+" -> "+self.robot_ee_frame_name print w_P_ee print self.camera_frame_name + " -> " + self.marker_frame_name print c_P_m #save data #safe_pose_to_file(self.f,w_P_ee) #safe_pose_to_file(self.f,c_P_m) self.crc.store_frames(posemath.fromMsg( w_P_ee),posemath.fromMsg(c_P_m)) print "saved so far" print len(self.crc._w_T_ee) else: print "error in retrieving a frame" return EmptyResponse();
def compute_pose_label(self, img): img.fill(0) kdl_origin_pose = pm.fromMsg(self.pose_list[0].pose) prev_pixel_left = [150, self.image_height - 1] prev_pixel_right = [530, self.image_height - 1] for pose in self.pose_list: kdl_pose = pm.fromMsg(pose.pose) delta_pose = kdl_origin_pose.Inverse()*kdl_pose pt = np.array([delta_pose.p.x(), delta_pose.p.y(), delta_pose.p.z() - 0.3]) pt_cam = self.R_cam2base.dot(pt) + self.t_cam2base pixel = self.back_project(pt_cam) if self.in_image(pixel): pt_left = np.array([delta_pose.p.x(), delta_pose.p.y() + 1, delta_pose.p.z() - 0.3]) pt_left_cam = self.R_cam2base.dot(pt_left) + self.t_cam2base pixel_left = self.back_project(pt_left_cam) pt_right = np.array([delta_pose.p.x(), delta_pose.p.y() - 1, delta_pose.p.z() - 0.3]) pt_right_cam = self.R_cam2base.dot(pt_right) + self.t_cam2base pixel_right = self.back_project(pt_right_cam) contour = np.array([pixel_left, pixel_right, prev_pixel_right, prev_pixel_left]) cv2.fillPoly(img, np.int32([contour]), (1, 0, 0)) prev_pixel_left = pixel_left prev_pixel_right = pixel_right
def LoopPosearrayInitpose(transformstamped, posestampedarray_src, posestampedarray_target): tmp = 1 id_src = checkHeaderID(transformstamped.header.frame_id, posestampedarray_src) id_target = checkHeaderID(transformstamped.child_frame_id, posestampedarray_target) # if () if not (id_src > 0 and id_target > 0): print( "========================================checkHeaderID===========================" ) print( "transformstamped.header.frame_id = {} : posestampedarray_src = {}" .format(transformstamped.header.frame_id, posestampedarray_src)) print( "transformstamped.child_frame_id = {} : posestampedarray_target = {}" .format(transformstamped.child_frame_id, posestampedarray_target)) # assert(False) return None trans_r1_f1_findex = posemath.fromMsg( posestampedarray_src.poseArray[id_src].pose) trans_r1_r2_findex_findex = posemath.fromMsg( trans2pose(transformstamped.transform)).Inverse() trans_r1_r2_f1_finex = trans_r1_f1_findex * trans_r1_r2_findex_findex trans_r2_findex_f1 = posemath.fromMsg( posestampedarray_target.poseArray[id_target].pose).Inverse() trans_r1_r2_f1_f1 = trans_r1_r2_f1_finex * trans_r2_findex_f1 return posemath.toMsg(trans_r1_r2_f1_f1)
def publish_tf_to_tree(tf_msg, var_frame_id, var_child_frame_id): global br, listener print("start publish tf") listener.waitForTransform(var_frame_id, tf_msg.header.frame_id, rospy.Time(), rospy.Duration(4.0)) listener.waitForTransform(tf_msg.child_frame_id, var_child_frame_id, rospy.Time(), rospy.Duration(4.0)) frame_tf_msg = listener._buffer.lookup_transform( strip_leading_slash(var_frame_id), strip_leading_slash(tf_msg.header.frame_id), rospy.Time(0)) child_frame_tf_msg = listener._buffer.lookup_transform( strip_leading_slash(tf_msg.child_frame_id), strip_leading_slash(var_child_frame_id), rospy.Time(0)) print("frame_tf_msg:" + str(frame_tf_msg)) print("child_frame_tf_msg:" + str(child_frame_tf_msg)) frame_tf = posemath.fromMsg(trackutils.trans2pose(frame_tf_msg.transform)) print("frame_tf:" + str(frame_tf)) child_frame_tf = posemath.fromMsg( trackutils.trans2pose(child_frame_tf_msg.transform)) print("child_frame_tf:" + str(child_frame_tf)) tf_kdl = posemath.fromMsg(trackutils.trans2pose(tf_msg.transform)) print("tf_kdl:" + str(tf_kdl)) tf_msg.transform = trackutils.pose2trans( posemath.toMsg(frame_tf * tf_kdl * child_frame_tf)) tf_msg.header.stamp = tf_msg.header.stamp tf_msg.header.frame_id = var_frame_id tf_msg.child_frame_id = var_child_frame_id print("publish:" + str(tf_msg)) br.sendTransformMessage(tf_msg)
def move_rel_to_cartesian_pose_with_contact(self, time_from_start, rel_pose, wrench_constraint): print self.BCOLOR+"[IRPOS] Move relative to cartesian trajectory with contact"+self.ENDC self.conmanSwitch([self.robot_name+'mPoseInt', self.robot_name+'mForceTransformation'], [], True) time.sleep(0.05) actual_pose = self.get_cartesian_pose() # Transform poses to frames. actualFrame = pm.fromMsg(actual_pose) relativeFrame = pm.fromMsg(rel_pose) desiredFrame = actualFrame * relativeFrame pose = pm.toMsg(desiredFrame) cartesianGoal = CartesianTrajectoryGoal() cartesianGoal.wrench_constraint = wrench_constraint cartesianGoal.trajectory.points.append(CartesianTrajectoryPoint(rospy.Duration(time_from_start), pose, Twist())) cartesianGoal.trajectory.header.stamp = rospy.get_rostime() + rospy.Duration(0.1) self.pose_client.send_goal(cartesianGoal) self.pose_client.wait_for_result() result = self.pose_client.get_result() code = self.cartesian_error_code_to_string(result.error_code) print self.BCOLOR+"[IRPOS] Result: "+str(code)+self.ENDC self.conmanSwitch([], [self.robot_name+'mPoseInt', self.robot_name+'mForceTransformation'], True)
def TFThread(self): while not rospy.is_shutdown(): small_frame_list = [] big_frame_list = [] self_frame = self.robot_id + "/map" for tf_frame, tf in self.tf_between_robots_dict.items(): if tf_frame[:len(self_frame)] == self_frame: big_frame_list.append(tf.child_frame_id) elif tf_frame[-len(self_frame):] == self_frame: small_frame_list.append(tf.header.frame_id) small_frame_list.sort() for i in range(1, len(small_frame_list)): tf1 = self.tf_between_robots_dict[small_frame_list[i - 1] + ' to ' + self_frame] tf2 = self.tf_between_robots_dict[small_frame_list[i] + ' to ' + self_frame] pose1 = posemath.fromMsg(trans2pose(tf1)) pose2 = posemath.fromMsg(trans2pose(tf2)) tf = TransformStamped() tf.header.stamp = rospy.Time.now() tf.header.frame_id = tf1.header.frame_id tf.child_frame_id = tf2.header.frame_id tf.transform = pose2trans( posemath.toMsg(pose1 * pose2.Inverse())) print("publish tf: " + tf.header.frame_id + " to " + tf.child_frame_id) self.br.sendTransformMessage(tf) if len(small_frame_list) > 0: print("publish tf: " + small_frame_list[-1] + ' to ' + self_frame) self.br.sendTransformMessage( self.tf_between_robots_dict[small_frame_list[-1] + ' to ' + self_frame]) big_frame_list.sort() if len(big_frame_list) > 0: print("publish tf: " + self_frame + ' to ' + big_frame_list[0]) self.br.sendTransformMessage( self.tf_between_robots_dict[self_frame + ' to ' + big_frame_list[0]]) for i in range(1, len(big_frame_list)): tf1 = self.tf_between_robots_dict[self_frame + ' to ' + big_frame_list[i - 1]] tf2 = self.tf_between_robots_dict[self_frame + ' to ' + big_frame_list[i]] pose1 = posemath.fromMsg(trans2pose(tf1)) pose2 = posemath.fromMsg(trans2pose(tf2)) tf = TransformStamped() tf.header.stamp = rospy.Time.now() tf.header.frame_id = tf1.child_frame_id tf.child_frame_id = tf2.child_frame_id tf.transform = pose2trans( posemath.toMsg(pose1.Inverse() * pose2)) print("publish tf: " + tf.header.frame_id + " to " + tf.child_frame_id) self.br.sendTransformMessage(tf) time.sleep(1)
def Relarray2Globalarray(rel_array, global_array): current_pose = Pose() current_pose.orientation.w = 1 global_array.poses.append(current_pose) for pose in rel_array.poses: current_pose = posemath.toMsg( posemath.fromMsg(current_pose) * posemath.fromMsg(pose)) global_array.poses.append(current_pose) return global_array
def draw_raw_map(): global package_path global arucos_dict global BASE_ARUCO_ID global rviz_marker_pub global aruco_map while(rviz_marker_pub.get_num_connections() < 1): if rospy.is_shutdown(): rospy.signal_shutdown('Master主节点没有开启') # load_aruco_map() rospy.logwarn("请创建RVIZ的Subscriber") rospy.sleep(5) aruco_map[BASE_ARUCO_ID] = get_base_tag_pose() # 绘制参考marker draw_single_marker(BASE_ARUCO_ID, get_base_tag_pose(), alpha=0.8, height=0.05) # 构建一个图 graph = aruco_udg(max_dis=rospy.get_param('max_distance')) order = graph.BFS() for aruco_id in order[1:]: # 跳过第一个, 因为第一个是base aruco, 已经确定 # 按照拓扑顺序依次遍历 parent_id = graph.nodes_dict[aruco_id].parent_id # 获取所有记录到的从parent变换到当前码的变换 pose_list = arucos_dict[parent_id][aruco_id] # 均值滤波获取Pose pose = weighted_mean_filter(pose_list) # 父亲码在世界坐标系下的位姿 parent_pose = aruco_map[parent_id] world2parent_mat = pm.toMatrix(pm.fromMsg(parent_pose)) parent2child_mat = pm.toMatrix(pm.fromMsg(pose)) world2child_mat = world2parent_mat.dot(parent2child_mat) aruco_pose = pm.toMsg(pm.fromMatrix(world2child_mat)) # 地图追加该aruco码 aruco_map[aruco_id] = aruco_pose # 绘制当前的aruco码 draw_single_marker(aruco_id, aruco_pose, alpha=0.8, height=0.05) # 序列化保存 with open(package_path + '/data/aruco_map.bin', 'wb') as f: pickle.dump(aruco_map, f) # 发布静态TF变换 for aruco_id, pose in aruco_map.items(): send_aruco_static_transform(aruco_id, pose) # 结束当前的进程 rospy.signal_shutdown('绘制了所有的aruco,并发布了aruco码的TF变换')
def plug_in_contact(ud): """Returns true if the plug is in contact with something.""" pose_base_gripper = fromMsg(TFUtil.wait_and_lookup('base_link', 'r_gripper_tool_frame').pose) pose_outlet_plug = fromMsg(ud.base_to_outlet).Inverse() * pose_base_gripper * fromMsg(ud.gripper_to_plug) # check if difference between desired and measured outlet-plug along x-axis is more than 1 cm ud.outlet_to_plug_contact = toMsg(pose_outlet_plug) if math.fabs(pose_outlet_plug.p[0] - ud.approach_offset) > 0.01: return True return False
def test_custom_reference_relative_move(self): """ Test if relative moves work with custom reference frame as expected Test sequence: 1. Get the test data and publish reference frame via TF. 2. Create a relative Ptp with and without custom reference. 3. convert the goals. 4. Evaluate the results. """ rospy.loginfo("test_custom_reference_frame_relative") self.robot.move(Ptp(goal=[0, 0, 0, 0, 0, 0])) # get and transform test data ref_frame = self.test_data.get_pose("Blend_1_Mid", PLANNING_GROUP_NAME) goal_pose_bf = self.test_data.get_pose("Blend_1_Start", PLANNING_GROUP_NAME) goal_pose_bf_tf = pm.toTf(pm.fromMsg(goal_pose_bf)) ref_frame_tf = pm.toTf(pm.fromMsg(ref_frame)) rospy.sleep(rospy.Duration.from_sec(0.5)) # init base_frame_name = self.robot._robot_commander.get_planning_frame() time_tf = rospy.Time.now() goal_pose_in_rf = [[0, 0, 0], [0, 0, 0, 1]] try: self.tf.sendTransform(goal_pose_bf_tf[0], goal_pose_bf_tf[1], time_tf, "rel_goal_pose_bf", base_frame_name) self.tf.sendTransform(ref_frame_tf[0], ref_frame_tf[1], time_tf, "ref_rel_frame", base_frame_name) self.tf_listener.waitForTransform("rel_goal_pose_bf", "ref_rel_frame", time_tf, rospy.Duration(2, 0)) rospy.sleep(rospy.Duration.from_sec(0.1)) goal_pose_in_rf = self.tf_listener.lookupTransform("ref_rel_frame", "rel_goal_pose_bf", time_tf) except: rospy.logerr("Failed to setup transforms for test!") goal_pose_rf_msg = pm.toMsg(pm.fromTf(goal_pose_in_rf)) # move to initial position and use relative move to reach goal self.robot.move(Ptp(goal=ref_frame)) self.tf.sendTransform(ref_frame_tf[0], ref_frame_tf[1], rospy.Time.now(), "ref_rel_frame", base_frame_name) rospy.sleep(rospy.Duration.from_sec(0.1)) self.tf_listener.waitForTransform(base_frame_name, "ref_rel_frame", rospy.Time(0), rospy.Duration(1, 0)) ptp = Ptp(goal=goal_pose_rf_msg, reference_frame="ref_rel_frame", relative=True) req = ptp._cmd_to_request(self.robot) self.assertIsNotNone(req) self._analyze_request_pose(TARGET_LINK_NAME, goal_pose_bf, req)
def optitrack_cb(self, msg): if msg.header.frame_id != self.global_frame: return for rigid_body in msg.bodies: if rigid_body.tracking_valid: if rigid_body.id == self.table_id: frame = posemath.fromMsg(rigid_body.pose) self.Ttable = posemath.toMatrix(frame) if rigid_body.id == self.stick_id: frame = posemath.fromMsg(rigid_body.pose) self.Tshort = posemath.toMatrix(frame)
def oplus(cur_estimate, step): result = deepcopy(cur_estimate) # loop over camera's for camera, res, camera_index in zip(cur_estimate.cameras, result.cameras, [r*pose_width for r in range(len(cur_estimate.cameras))]): res.pose = posemath.toMsg(pose_oplus(posemath.fromMsg(camera.pose), step[camera_index:camera_index+pose_width])) # loop over targets for i, target in enumerate(cur_estimate.targets): target_index = (len(cur_estimate.cameras) + i) * pose_width result.targets[i] = posemath.toMsg(pose_oplus(posemath.fromMsg(target), step[target_index:target_index+pose_width])) return result
def VOPoseArray2MapPoseArray(vo_pose_array, map_pose_array): init_pose = Pose() init_pose.orientation.x = 0.5 init_pose.orientation.y = -0.5 init_pose.orientation.z = 0.5 init_pose.orientation.w = -0.5 for pose in vo_pose_array.poses: map_pose_array.poses.append( posemath.toMsg( posemath.fromMsg(init_pose) * posemath.fromMsg(pose))) #2D# map_pose_array.poses[-1].position.z = 0 return map_pose_array
def calculate_residual_and_jacobian(cal_samples, cur_estimate): """ returns the full residual vector and jacobian """ # Compute the total number of rows. This is the number of poses * 6 num_cols = len(cur_estimate.cameras) * pose_width + len(cur_estimate.targets) * pose_width num_rows = sum ([ sum([ len(cam.features.image_points) for cam in cur_sample.M_cam]) for cur_sample in cal_samples]) * feature_width J = matrix(zeros([num_rows, num_cols])) residual = matrix(zeros([num_rows, 1])) cam_pose_dict = dict( [ (cur_camera.camera_id, posemath.fromMsg(cur_camera.pose)) for cur_camera in cur_estimate.cameras] ) cam_index_dict = dict( [ (cur_camera.camera_id, cur_index) for cur_camera, cur_index in zip ( cur_estimate.cameras, range(len(cur_estimate.cameras)) )] ) # Start filling in the jacobian cur_row = 0; # Loop over each observation for cur_sample, target_pose_msg, target_index in zip(cal_samples, cur_estimate.targets, range(len(cur_estimate.targets))): for cam_measurement in cur_sample.M_cam: # Find the index of this camera try: cam_pose = cam_pose_dict[cam_measurement.camera_id] cam_index = cam_index_dict[cam_measurement.camera_id] except KeyError: print "Couldn't find current camera_id in cam_pose dictionary" print " camera_id = %s", cam_measurement.camera_id print " %s", cam_pose_dict.keys() raise # ROS Poses -> KDL Poses target_pose = posemath.fromMsg(target_pose_msg) end_row = cur_row + len(cam_measurement.features.image_points)*feature_width # ROS Target Points -> (4xN) Homogenous coords target_pts = matrix([ [pt.x, pt.y, pt.z, 1.0] for pt in cam_measurement.features.object_points ]).transpose() # Save the residual for this cam measurement measurement_vec = matrix( concatenate([ [cur_pt.x, cur_pt.y] for cur_pt in cam_measurement.features.image_points]) ).T expected_measurement = sub_h(cam_pose, target_pose, target_pts, cam_measurement.cam_info) residual[cur_row:end_row, 0] = measurement_vec - expected_measurement # Compute jacobian for this cam measurement camera_J = calculate_sub_jacobian(cam_pose, target_pose, target_pts, cam_measurement.cam_info, use_cam = True) #print "Camera Jacobian [%s]]" % cam_measurement.camera_id #print camera_J target_J = calculate_sub_jacobian(cam_pose, target_pose, target_pts, cam_measurement.cam_info, use_cam = False) J[cur_row:end_row, cam_index*pose_width:((cam_index+1)*pose_width)] = camera_J col_start = (len(cur_estimate.cameras) + target_index)*pose_width J[cur_row:end_row, col_start:(col_start+pose_width)] = target_J cur_row = end_row return residual, J
def __position_cartesian_current_cb(self, data): """Callback for the current cartesian position. """ if data.header.frame_id == "PSM1": self.__position_cartesian_current_msg[0] = data.pose self.__position_cartesian_current_frame[0] = posemath.fromMsg(data.pose) self.__get_position_event[0].set() elif data.header.frame_id == "PSM2": self.__position_cartesian_current_msg[1] = data.pose self.__position_cartesian_current_frame[1] = posemath.fromMsg(data.pose) self.__get_position_event[1].set() self.__pose_cartesian_current = self.get_current_pose('rad')
def get_outlet_to_plug_ik_goal(ud, pose): """Get an IK goal for a pose in the frame of the outlet""" pose_gripper_wrist = fromMsg(TFUtil.wait_and_lookup("r_gripper_tool_frame", "r_wrist_roll_link", rospy.Time.now(), rospy.Duration(2.0)).pose) goal = ArmMoveIKGoal() goal.ik_seed = get_action_seed('pr2_plugs_configuration/approach_outlet_seed') goal.pose.pose = toMsg(fromMsg(ud.base_to_outlet) * pose * fromMsg(ud.gripper_to_plug).Inverse() * pose_gripper_wrist) goal.pose.header.stamp = rospy.Time.now() goal.pose.header.frame_id = 'base_link' return goal
def map_to_world(self, x, y): if not self.map: raise NotEnoughDataException('no map message received.') stamped = PoseStamped() stamped.header.frame_id = self.map.header.frame_id stamped.pose.position.x = x * self.map.info.resolution stamped.pose.position.y = y * self.map.info.resolution stamped.pose.orientation.w = 1 origin_transform = pm.fromMsg(self.map.info.origin) center_transform = pm.fromMsg(stamped.pose) stamped.pose = pm.toMsg(origin_transform * center_transform) return stamped
def is_good_verification_pose(self, stamped, banner): robot_frame = pm.fromMsg(stamped.pose) banner_frame = pm.fromMsg(banner.pose.pose) view_ray = banner_frame.p - robot_frame.p distance = view_ray.Norm() if distance < Params().min_verification_distance or distance > Params().max_verification_distance*1.5: rospy.loginfo('bad verification pose: distance {}'.format(distance)) return False # angle = self._get_facing_angle(robot_frame, banner_frame) # if abs(angle) > Params().max_verification_angle: # rospy.loginfo('bad verification pose: angle {}'.format(math.degrees(angle))) # return False return True
def is_good_approach_pose(self, stamped, banner): robot_frame = pm.fromMsg(stamped.pose) banner_frame = pm.fromMsg(banner.pose.pose) view_ray = banner_frame.p - robot_frame.p distance = view_ray.Norm() if distance > Params().approach_distance_tolerance: rospy.loginfo('bad approach pose: distance {}'.format(distance)) return False angle = self._get_viewing_angle(robot_frame, banner_frame) if abs(angle) > Params().max_approach_angle: rospy.loginfo('bad approach pose: angle {}'.format(math.degrees(angle))) return False return True
def sample_approach_from_averaged(self, line1, line2): line1_frame = pm.fromMsg(line1) line2_frame = pm.fromMsg(line2) averaged_frame = PyKDL.Frame() averaged_frame.p = (line1_frame.p + line2_frame.p) * 0.5 [roll, pitch, yaw1] = line1_frame.M.GetRPY() [roll, pitch, yaw2] = line2_frame.M.GetRPY() averaged_frame.M = PyKDL.Rotation.RPY(0, 0, (yaw1+yaw2)/2.0) approach_frame = PyKDL.Frame() approach_frame.p = averaged_frame.p + averaged_frame.M * PyKDL.Vector(Params().approach_distance * 1.5, 0, 0) [roll, pitch, yaw] = averaged_frame.M.GetRPY() approach_frame.M = PyKDL.Rotation.RPY(0, 0, yaw+math.pi) return pm.toMsg(approach_frame)
def get_straighten_goal(ud, goal): """Generate an ik goal to straighten the plug in the outlet.""" pose_gripper_wrist = fromMsg(TFUtil.wait_and_lookup('r_gripper_tool_frame', 'r_wrist_roll_link').pose) pose_base_wrist = fromMsg(ud.base_to_outlet) * fromMsg(ud.outlet_to_plug_contact) * fromMsg(ud.gripper_to_plug).Inverse() * pose_gripper_wrist goal = ArmMoveIKGoal() goal.pose.pose = toMsg(pose_base_wrist) goal.pose.header.stamp = rospy.Time.now() goal.pose.header.frame_id = 'base_link' goal.ik_seed = get_action_seed('pr2_plugs_configuration/approach_outlet_seed') goal.move_duration = rospy.Duration(0.5) return goal
def get_pull_back_goal(ud, goal): """Generate an ik goal to move along the local x axis of the outlet.""" pose_outlet_plug = PyKDL.Frame(PyKDL.Vector(-0.10, 0, 0)) pose_gripper_wrist = fromMsg(TFUtil.wait_and_lookup('r_gripper_tool_frame', 'r_wrist_roll_link').pose) pose_base_wrist = fromMsg(ud.base_to_outlet) * pose_outlet_plug * fromMsg(ud.gripper_to_plug).Inverse() * pose_gripper_wrist goal = ArmMoveIKGoal() goal.pose.pose = toMsg(pose_base_wrist) goal.pose.header.stamp = rospy.Time.now() goal.pose.header.frame_id = 'base_link' goal.ik_seed = get_action_seed('pr2_plugs_configuration/approach_outlet_seed') goal.move_duration = rospy.Duration(3.0) return goal
def do_t(self, arg): if (self.teach_mode): if (self.teach_mode_rel): print "Calculating relative motion" f1 = posemath.fromMsg(self.last_pose) current_global_pose = self.getCurrentPose() f2 = posemath.fromMsg(current_global_pose) diff = f1 * f2.Inverse() print diff self.current_traj.poses.append(posemath.toMsg(diff)) else: self.current_traj.poses.append(self.getCurrentPose()) else: print "Please activate teach mode first using tm"
def get_twist_goal(ud, goal): """Generate an ik goal to rotate the plug""" pose_contact_plug = PyKDL.Frame(PyKDL.Rotation.RPY(ud.twist_angle, 0, 0)) pose_gripper_wrist = fromMsg(TFUtil.wait_and_lookup('r_gripper_tool_frame', 'r_wrist_roll_link').pose) pose_base_wrist = fromMsg(ud.base_to_outlet) * fromMsg(ud.outlet_to_plug_contact) * pose_contact_plug * fromMsg(ud.gripper_to_plug).Inverse() * pose_gripper_wrist goal = ArmMoveIKGoal() goal.pose.pose = toMsg(pose_base_wrist) goal.pose.header.stamp = rospy.Time.now() goal.pose.header.frame_id = 'base_link' goal.ik_seed = get_action_seed('pr2_plugs_configuration/approach_outlet_seed') goal.move_duration = rospy.Duration(1.0) return goal
def optimizeOrientationSamples(moveItInterface, binName, eefPoseInGripperBase, goalFrameName, poseSamples, noiseStdev, desiredZAxis, numAngles): """ Optimizes orientation around Z-axis of pose samples as to achieve configurations for the robot arm that are less likely to be in collision with the shelf. We try to optimize an equation of the form Rz(theta) * x = y @param eefPoseInGripperBase: current pose of the end effector w.r.t. gripper base frame (PoseStamped) @param goalFrameName: name of the goal frame (e.g. object frame name) @param poseSamples: list of poses (list of PoseStamped) @param desiredZAxis: the desired zaxis of the gripper base frame w.r.t. base frame @param numAngles: the number of angles to try for the pose optimization (the more the merrier, but slower) @return: pose samples with optimized orientation """ binPose = moveItInterface.getTransform(binName) F_base_bin = posemath.fromMsg(binPose.pose) # F_bin_base = F_base_bin.Inverse() F_gripper_base_eef = posemath.fromMsg(eefPoseInGripperBase.pose) R_eef_gripper_base = F_gripper_base_eef.M.Inverse() F_base_goal = posemath.fromMsg( (moveItInterface.getTransform(goalFrameName)).pose) optimizedSamples = [] for sample in poseSamples: moveItInterface.checkPreemption() # transform samples to bin frame F_bin_sample = posemath.fromMsg( moveItInterface.transformPose(sample, binName).pose) R_bin_sample = F_bin_sample.M # get the optimal angle around Z axis optimal_angle = sampleOptimalZAngle(moveItInterface, R_bin_sample, R_eef_gripper_base, desiredZAxis, numAngles) # calculate rotated TSR with some gaussian noise and convert to object frame noise = numpy.random.normal(0.0, noiseStdev) R_bin_sample_rotated = R_bin_sample * kdl.Rotation.RotZ(optimal_angle + noise) F_bin_sample_rotated = kdl.Frame(R_bin_sample_rotated, F_bin_sample.p) F_obj_sample_rotated = F_base_goal.Inverse( ) * F_base_bin * F_bin_sample_rotated sample.pose = posemath.toMsg(F_obj_sample_rotated) sample.header.frame_id = goalFrameName optimizedSamples.append(sample) return optimizedSamples
def get_grasp_plug_goal(ud, goal): """Get the ik goal for grasping the plug.""" pose_gripper_wrist = fromMsg(TFUtil.wait_and_lookup('r_gripper_tool_frame', 'r_wrist_roll_link').pose) goal = ArmMoveIKGoal() goal.pose.pose = toMsg(fromMsg(ud.plug_on_base_pose) * fromMsg(ud.gripper_plug_grasp_pose).Inverse() * pose_gripper_wrist) goal.pose.header.stamp = rospy.Time.now() goal.pose.header.frame_id = 'base_link' goal.ik_seed = get_action_seed('pr2_plugs_configuration/grasp_plug_seed') goal.move_duration = rospy.Duration(3.0) return goal
def markerCallback(data): marker_lock.acquire() self.visible_markers = [] for m in data.markers: if m.id in marker_list: self.visible_markers.append( (m.id, pm.fromMsg(m.pose.pose)) ) marker_lock.release()
def servo_to_pose_call(self,req): #rospy.loginfo('Recieved servo to pose request') #print req if self.driver_status == 'SERVO': T = pm.fromMsg(req.target) # Check acceleration and velocity limits (acceleration, velocity) = self.check_req_speed_params(req) # inverse kinematics traj = self.planner.getCartesianMove(T,self.q0,self.base_steps,self.steps_per_meter) if len(traj.points) == 0: (code,res) = self.planner.getPlan(req.target,self.q0) # find a non-local movement if not res is None: traj = res.planned_trajectory.joint_trajectory #if len(traj) == 0: # traj.append(self.planner.ik(T,self.q0)) # Send command if len(traj.points) > 0: rospy.logwarn("Robot moving to " + str(traj.points[-1].positions)) return self.send_trajectory(traj,acceleration,velocity) else: rospy.logerr('SIMPLE DRIVER -- IK failed') return 'FAILURE - not in servo mode' else: rospy.logerr('SIMPLE DRIVER -- Not in servo mode') return 'FAILURE - not in servo mode'
def processMarkerFeedback(feedback): global marker_tf, marker_name marker_name = feedback.marker_name marker_offset_tf = ((0, 0, marker_offset), tf.transformations.quaternion_from_euler(0, 0, 0)) marker_tf = pm.toTf(pm.fromMatrix(numpy.dot(pm.toMatrix(pm.fromMsg(feedback.pose)), pm.toMatrix(pm.fromTf(marker_offset_tf))))) if feedback.menu_entry_id: marker_tf = zero_tf
def setArmPosition(self, pos, preferedAngs=None): self.currentPos = np.array(pos) if not self.useOptim: angs = self.__oldIVK__(pos, preferedAngs) else: print(pm.toMatrix(pm.fromMsg(self.ur2ros(pos)))) trj = trajectoryOptim(self.lastJointAngles, pm.toMatrix(pm.fromMsg(self.ur2ros(pos)))) trj._numSteps = 1 angs = trj.optimize() loc = angs[-1] angs = angs[0][0] self.lastJointAngles = angs self.__publish__(angs) self.currentPos[0:3] = loc[0][0:-1,3] return loc[0][0:-1,3]
def calc_relative_pose(self, x,y,z,roll=0,pitch=0,yew=0, in_tip_frame=True): _pose=self.endpoint_pose() ######################################## # set target position trans = PyKDL.Vector(x,y,z) rot = PyKDL.Rotation.RPY(roll,pitch,yew) f2 = PyKDL.Frame(rot, trans) if in_tip_frame: # endpoint's cartesian systems _pose=posemath.toMsg(posemath.fromMsg(_pose) * f2) else: # base's cartesian systems _pose=posemath.toMsg(f2 * posemath.fromMsg(_pose)) return _pose
def processFeedback(feedback): global action_trajectory_client if action_trajectory_client == None: return # print "feedback", feedback.marker_name, feedback.control_name, feedback.event_type if ( feedback.marker_name == 'head_position_marker' ) and ( feedback.event_type == InteractiveMarkerFeedback.BUTTON_CLICK ) and ( feedback.control_name == "button" ): T_B_Td = pm.fromMsg(feedback.pose) rz, ry, rx = T_B_Td.M.GetEulerZYX() duration = 3.0 action_trajectory_goal = control_msgs.msg.FollowJointTrajectoryGoal() action_trajectory_goal.trajectory.header.stamp = rospy.Time.now() + rospy.Duration(0.2) action_trajectory_goal.trajectory.joint_names = ['head_pan_joint', 'head_tilt_joint'] pt = trajectory_msgs.msg.JointTrajectoryPoint() pt.positions = [rz, ry] pt.time_from_start = rospy.Duration(duration) action_trajectory_goal.trajectory.points.append(pt) action_trajectory_client.send_goal(action_trajectory_goal) print "moving the head in %s seconds..."%(duration)
def pub_graspit_grasp_tf(self, object_name, graspit_grasp_msg): # type: (str, graspit_msgs.msg.Grasp) -> () # Is this needed anymore? tf_pose = pm.toTf(pm.fromMsg(graspit_grasp_msg.final_grasp_pose)) self.tf_broadcaster.sendTransform(tf_pose[0], tf_pose[1], rospy.Time.now(), "/grasp_approach_tran", object_name)
def get_staubli_cartesian_as_tran(): """@brief - Read the staubli end effector's cartesian position as a 4x4 numpy matrix Returns 4x4 numpy message on success, empty message on failure """ current_pose = get_staubli_cartesian_as_pose_msg() return pm.toMatrix(pm.fromMsg(current_pose))
def get_waypoints(self,frame_type,predicates,transforms,names): self.and_srv.wait_for_service() type_predicate = PredicateStatement() type_predicate.predicate = frame_type type_predicate.params = ['*','',''] res = self.and_srv([type_predicate]+predicates) print "Found matches: " + str(res.matching) #print res if (not res.found) or len(res.matching) < 1: return None poses = [] for tform in transforms: poses.append(pm.fromMsg(tform)) #print poses new_poses = [] new_names = [] for match in res.matching: try: (trans,rot) = self.listener.lookupTransform(self.world,match,rospy.Time(0)) for (pose, name) in zip(poses,names): #resp.waypoints.poses.append(pm.toMsg(pose * pm.fromTf((trans,rot)))) new_poses.append(pm.toMsg(pm.fromTf((trans,rot)) * pose)) new_names.append(match + "/" + name) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): rospy.logwarn('Could not find transform from %s to %s!'%(self.world,match)) return (new_poses, new_names)
def project_pose(self, pose): frame = pm.fromMsg(pose) [roll, pitch, yaw] = frame.M.GetRPY() frame.M = PyKDL.Rotation.RPY(0, 0, yaw) projected = pm.toMsg(frame) projected.position.z = 0 return projected
def transform_wrist_frame(T_tool, ft, tool_x_offset=0.0): ''' :param T_tool: desired *_gripper_tool_frame pose w.r.t. some reference frame. Can be of type kdl.Frame or geometry_msgs.Pose. :param ft: bool. True if the arm has a force-torque sensor :param tool_x_offset: (double) offset in tool length :return: T_torso_wrist. geometry_msgs.Pose type. Wrist pose w.r.t. same ref frame as T_tool ''' if ft: T_wrist_tool = kdl.Frame(kdl.Rotation.Identity(), kdl.Vector(0.216 + tool_x_offset, 0.0, 0.0)) else: T_wrist_tool = kdl.Frame(kdl.Rotation.Identity(), kdl.Vector(0.180 + tool_x_offset, 0.0, 0.0)) if type(T_tool) is Pose: T_tool_ = posemath.fromMsg(T_tool) elif type(T_tool) is tuple and len(T_tool) is 2: T_tool_ = posemath.fromTf(T_tool) else: T_tool_ = T_tool T_wrist_ = T_tool_*T_wrist_tool.Inverse() T_wrist = posemath.toMsg(T_wrist_) return T_wrist
def servo_to_pose_call(self, req): if self.driver_status == 'SERVO': T = pm.fromMsg(req.target) # Check acceleration and velocity limits (acceleration, velocity) = self.check_req_speed_params(req) # inverse kinematics traj = self.planner.getCartesianMove(T, self.q0, self.base_steps, self.steps_per_meter) #if len(traj.points) == 0: # (code,res) = self.planner.getPlan(req.target,self.q0) # find a non-local movement # if not res is None: # traj = res.planned_trajectory.joint_trajectory # Send command if len(traj.points) > 0: rospy.logwarn("Robot moving to " + str(traj.points[-1].positions)) return self.send_trajectory(traj, acceleration, velocity, cartesian=False, linear=True) else: rospy.logerr('SIMPLE DRIVER -- IK failed') return 'FAILURE - not in servo mode' else: rospy.logerr('SIMPLE DRIVER -- Not in servo mode') return 'FAILURE - not in servo mode'
def transformPose(self, pose, targetFrame): with self._lock: tTargetFramePoseFrame = None for trial in range(10): self.checkPreemption() try: tTargetFramePoseFrame = self._tfListener.lookupTransform( targetFrame, pose.header.frame_id, rospy.Time(0)) break except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): rospy.sleep(0.5) if tTargetFramePoseFrame is None: raise ValueError('Could not transform pose from ' + pose.header.frame_id + ' to ' + targetFrame) # Convert poses to KDL fTargetFramePoseFrame = posemath.fromTf(tTargetFramePoseFrame) fPoseFrameChild = posemath.fromMsg(pose.pose) # Compute transformation fTargetFrameChild = fTargetFramePoseFrame * fPoseFrameChild # Convert back to ROS message poseStampedTargetFrameChild = PoseStamped() poseStampedTargetFrameChild.pose = posemath.toMsg( fTargetFrameChild) poseStampedTargetFrameChild.header.stamp = rospy.Time.now() poseStampedTargetFrameChild.header.frame_id = targetFrame return poseStampedTargetFrameChild
def update_tf(self): for key in self.waypoints.keys(): (poses,names) = self.get_waypoints_srv.get_waypoints(key,[],self.waypoints[key],self.waypoint_names[key]) for (pose,name) in zip(poses,names): (trans,rot) = pm.toTf(pm.fromMsg(pose)) self.broadcaster.sendTransform(trans,rot,rospy.Time.now(),name,self.world)
def torque_controller_cb(self, event): if rospy.is_shutdown() or self.cart_command == None: return elapsed_time = rospy.Time.now() - self.cart_command.header.stamp if elapsed_time.to_sec() > self.timeout: return # TODO: Validate msg.header.frame_id ## Cartesian error to zero using a Jacobian transpose controller x_target = posemath.fromMsg(self.cart_command.pose) q = self.get_joint_angles_array() x = baxter_to_kdl_frame(self.arm_interface.endpoint_pose()) xdot = baxter_to_kdl_twist(self.arm_interface.endpoint_velocity()) # Calculate a Cartesian restoring wrench x_error = PyKDL.diff(x_target, x) wrench = np.matrix(np.zeros(6)).T for i in range(len(wrench)): wrench[i] = -(self.kp[i] * x_error[i] + self.kd[i] * xdot[i]) # Calculate the jacobian J = self.kinematics.jacobian(q) # Convert the force into a set of joint torques. tau = J^T * wrench tau = J.T * wrench # Populate the joint_torques in baxter API format (dictionary) joint_torques = dict() for i, name in enumerate(self.joint_names): joint_torques[name] = tau[i] self.arm_interface.set_joint_torques(joint_torques)
def callback_state(self, data): # If state is not being reset proceed otherwise skip callback if self.reset.isSet(): if self.real_robot: # Convert Pose from relative to Map to relative to World frame f_r_in_map = posemath.fromMsg(data) f_r_in_world = self.world_to_map * f_r_in_map data = posemath.toMsg(f_r_in_world) x = data.position.x y = data.position.y orientation = PyKDL.Rotation.Quaternion(data.orientation.x, data.orientation.y, data.orientation.z, data.orientation.w) euler_orientation = orientation.GetRPY() yaw = euler_orientation[2] # Append Pose to Path stamped_mir_pose = PoseStamped() stamped_mir_pose.pose = data stamped_mir_pose.header.stamp = rospy.Time.now() stamped_mir_pose.header.frame_id = self.path_frame self.mir_path.poses.append(stamped_mir_pose) self.mir_exec_path.publish(self.mir_path) # Update internal Pose variable self.mir_pose = copy.deepcopy([x, y, yaw]) else: pass
def servo_to_pose_cb(self,req): if self.driver_status == 'SERVO': T = pm.fromMsg(req.target) # Check acceleration and velocity limits (acceleration, velocity) = self.check_req_speed_params(req) stamp = self.acquire() # inverse kinematics traj = self.planner.getCartesianMove(T, self.q0, self.base_steps, self.steps_per_meter, self.steps_per_radians, time_multiplier = (1./velocity), percent_acc = acceleration, use_joint_move = True, table_frame = self.table_pose) # Send command if len(traj.points) > 0: if stamp is not None: rospy.logwarn("Robot moving to " + str(traj.points[-1].positions)) res = self.send_trajectory(traj,stamp,acceleration,velocity,cartesian=False) self.release() else: res = 'FAILURE -- could not preempt current arm control.' return res else: rospy.logerr('SIMPLE DRIVER -- no trajectory points') return 'FAILURE -- no trajectory points' else: rospy.logerr('SIMPLE DRIVER -- Not in servo mode') return 'FAILURE -- not in servo mode'
def threaded_function(self, obj): pub = MarkerPublisher("attached_objects") while not self.stop_thread: pub.publishSinglePointMarker(PyKDL.Vector(), 1, r=1, g=0, b=0, a=1, namespace='default', frame_id=obj.link_name, m_type=Marker.CYLINDER, scale=Vector3(0.02, 0.02, 1.0), T=pm.fromMsg( obj.object.primitive_poses[0])) try: rospy.sleep(0.1) except: break try: pub.eraseMarkers(0, 10, namespace='default') rospy.sleep(0.5) except: pass
def callback(self, data): self.tfl.waitForTransform("/map", data.header.frame_id, data.header.stamp, rospy.Duration().from_sec(1.0)) map_pose = self.tfl.transformPose("/map", data) cb_in_map = pm.fromMsg(map_pose.pose) print "CB IN MAP\n", cb_in_map # correct for ambiguous checkerboard positions eulers = cb_in_map.M.GetEulerZYX() print eulers reversing = False if math.cos(eulers[0] - math.pi/2) < 0: cb_in_map.M = rotation_correction.M * cb_in_map.M print "reversing", cb_in_map reversing = True best_distance = 10000000 best_cb = checkerboards[0] for cb in checkerboards: distance = (cb_in_map.p - cb.p).Norm() if distance < best_distance: best_distance = distance best_cb = cb print "closest checkerboard %d"%checkerboards.index(best_cb) # compute cb to footprint data_out = self.tfl.transformPose("/base_footprint", data) #print "now", data_out cb_bl = pm.fromMsg(data_out.pose) if reversing: cb_bl.M = cb_bl.M * rotation_correction.M #compute base pose from known tranform and measured one base_pose = best_cb * cb_bl.Inverse() base_pose_msg = pm.toMsg(base_pose) print "Base Pose " print base_pose_msg self.compute_net_transform(base_pose)
def broadcast_table_frame(args): if table_pose is None: return with tf_lock: trans, rot = pm.toTf(pm.fromMsg(table_pose.pose)) br.sendTransform(trans, rot, rospy.Time.now(), "/table", table_pose.header.frame_id) br.sendTransform(trans, rot, rospy.Time.now() + rospy.Duration(0.005), "/table", table_pose.header.frame_id) br.sendTransform(trans, rot, rospy.Time.now() - rospy.Duration(0.005), "/table", table_pose.header.frame_id)
def getStowingEEFPose(moveItInterface, binName, arm, desiredGripperBasePose): """ Computes the EEF pose for the given arm achieve the desired gripper base pose in respect to the frame of bin with name binName. """ gripperFrameName = moveItInterface.getGripperFrameName(arm) gripperBaseFrameName = moveItInterface.getGripperBaseFrameName(arm) eefPoseInGripperBase = moveItInterface.getTransform(gripperFrameName, gripperBaseFrameName) binPose = moveItInterface.getTransform(binName) F_gripper_base_eef = posemath.fromMsg(eefPoseInGripperBase.pose) F_base_bin = posemath.fromMsg(binPose.pose) F_bin_gripper_base = posemath.fromMsg(desiredGripperBasePose) F_base_eef = F_base_bin * F_bin_gripper_base * F_gripper_base_eef pose = posemath.toMsg(F_base_eef) stampedPose = ArgumentsCollector.makeStamped(pose, 'base') return stampedPose