def tf_from_pose(self, pose_msg): # Update message timestamp self.tf_msg.header.stamp = rospy.Time.now() # Update translation self.tf_msg.transform.translation.x = pose_msg.position.x self.tf_msg.transform.translation.y = pose_msg.position.y self.tf_msg.transform.translation.z = pose_msg.position.z # Update rotation self.tf_msg.transform.rotation.x = pose_msg.orientation.x self.tf_msg.transform.rotation.y = pose_msg.orientation.y self.tf_msg.transform.rotation.z = pose_msg.orientation.z self.tf_msg.transform.rotation.w = pose_msg.orientation.w if sum ([self.tf_msg.transform.rotation.x, self.tf_msg.transform.rotation.y, self.tf_msg.transform.rotation.z, self.tf_msg.transform.rotation.w]) != 0.0: if self.invert == "True": # Create transformerROS and add our transform transformer = TransformerROS() transformer.setTransform(self.tf_msg) # Lookup the transform (trans, rot) = transformer.lookupTransform(self.tf_msg.header.frame_id, self.tf_msg.child_frame_id, rospy.Time(0)) # Create transform object transform = tft.concatenate_matrices(tft.translation_matrix(trans), tft.quaternion_matrix(rot)) # Invert inverse_tf = tft.inverse_matrix(transform) # Get translation, rotation vectors back out translation = tft.translation_from_matrix(inverse_tf) quaternion = tft.quaternion_from_matrix(inverse_tf) # Get RPY rpy = tft.euler_from_quaternion(quaternion) rpy_new = [rpy[0], rpy[1], -rpy[2]] # Back to quaternion quaternion = tft.quaternion_from_euler(rpy_new[0], rpy_new[1], rpy_new[2]) # Update translation self.tf_msg.transform.translation.x = translation[0] self.tf_msg.transform.translation.y = -translation[1] self.tf_msg.transform.translation.z = translation[2] # Update rotation self.tf_msg.transform.rotation.x = quaternion[0] self.tf_msg.transform.rotation.y = quaternion[1] self.tf_msg.transform.rotation.z = quaternion[2] self.tf_msg.transform.rotation.w = quaternion[3] # Publish transform self.broadcaster.sendTransform(self.tf_msg)
def get_transformation(frame_from='/base_link', frame_to='/local_map', time_from=None, time_to=None, static_frame=None, tf_listener=None, tf_ros=None): if tf_listener is None: tf_listener = TransformListener() if tf_ros is None: tf_ros = TransformerROS() try: if time_from is None or time_to is None: # tf_listener.waitForTransform(frame_from, frame_to, rospy.Time(), rospy.Duration(1.0)) (trans, rot) = tf_listener.lookupTransform(frame_to, frame_from, rospy.Time(0)) else: # tf_listener.waitForTransformFull(frame_from, time_from, frame_to, time_to, static_frame, rospy.Duration(1.0)) (trans, rot) = tf_listener.lookupTransformFull(frame_to, time_to, frame_from, time_from, static_frame) except (LookupException, ConnectivityException, ExtrapolationException): rospy.logerr("exception, from %s to %s frame may not have setup!", frame_from, frame_to) return None, None, None, None # pose.pose.orientation.w = 1.0 # Neutral orientation # tf_pose = self.tf_listener_.transformPose("/world", pose) # R_local = quaternion_matrix(tf_pose.pose.orientation) T = tf_ros.fromTranslationRotation(trans, rot) euler = euler_from_quaternion(rot) # print "Position of the pose in the local map:" # print trans, euler return T, trans, rot, euler
def __init__(self): # Origin of ther intersection coordinate system is the center of stopline 0. tile_side_length = 0.61 lane_width = 0.205 stopline_thickness = 0.048 center_markings_thickness = 0.025 q = lane_width p = stopline_thickness s = center_markings_thickness self.stopline_poses = [] self.tf1 = TransformerROS() self.tf2 = TransformerROS() def add_stopline_transform(idx, position, angle): orientation = unit_vector( quaternion_from_euler(0, 0, angle, axes='sxyz')) pose = utils.pose(position, orientation) # Add transform for reference stopline poses child_frame = self.stopline_frame(idx) parent_frame = 'intersection' transform = utils.transform_from_pose('intersection', child_frame, pose) self.tf1.setTransform(transform) # Add transform for measured stopline poses child_frame = self.intersection_frame(idx) parent_frame = self.stopline_frame(idx) inverted_pose = utils.invert_pose(utils.pose( position, orientation)) transform = utils.transform_from_pose(parent_frame, child_frame, inverted_pose) self.tf2.setTransform(transform) add_stopline_transform(0, [0.0, 0.0, 0.0], 0) add_stopline_transform( 1, [-(q / 2.0 + s + q + p / 2.0), p / 2.0 + q / 2.0, 0.0], -np.pi / 2.0) add_stopline_transform( 2, [-(q / 2.0 + s + q / 2.0), p / 2.0 + 2 * q + s + p / 2.0, 0.0], -np.pi) add_stopline_transform( 3, [q / 2.0 + p / 2.0, p / 2.0 + q + s + q / 2.0, 0.0], -3.0 * np.pi / 2.0)
def get_transform_from_pose(pose, tf_ros=None): """ from pose to origin (assumed 0,0,0) """ if tf_ros is None: tf_ros = TransformerROS() translation = (pose.position.x, pose.position.y, pose.position.z) rotation = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w) T_pose_to_origin = tf_ros.fromTranslationRotation(translation, rotation) return T_pose_to_origin
def __init__(self, num_particles=100, vis_noise=10, ultra_noise=100, mag_noise=37, linear_noise=.002, angular_noise=2.3, ar_noise=10, ar_resample_rate=10, ar_resample=True): print("Starting a particle filer with %d particles" % num_particles) self.filename = os.path.expanduser( "~/Dropbox/thesis_data/" + time.strftime('%Y_%m_%d_%H_%M_%S')) #in the format YYYYMMDDHHMMSS self.fp = open(self.filename + ".txt", "w") self.fp_part = open(self.filename + "_part.txt", "w") self.fp_ar = open(self.filename + "_ar.txt", "w") self.fp_alt = open(self.filename + "_alt.txt", "w") self.fp_sensor = open(self.filename + "_sensor.txt", "w") self.num_particles = num_particles self.vis_noise = vis_noise self.ultra_noise = ultra_noise self.mag_noise = mag_noise self.linear_noise = linear_noise self.angular_noise = angular_noise self.ar_noise = ar_noise self.ar_resample_rate = ar_resample_rate self.ar_resample = ar_resample self.rotY = 0 self.rotX = 0 self.start_mag_heading = 0 self.start_gyr_heading = 0 self.gyr_theta = 0 self.particle_list = [] self.weight_dict = dict() self.first_propagate = True self.first_correct = True self.step = 0 self.est = particle(self) self.acc_est = particle(self) #Estimation using acc and gyr self.vis_est = particle( self) #Estimation using vis odometry and ultrasound self.tag_est = particle(self) #Estimation using visual tags for i in range(num_particles): self.particle_list.append(particle(self)) self.est_pose = Pose() self.est_pub = rospy.Publisher('pf_localization', Pose) self.listener = TransformListener() self.transformer = TransformerROS() self.publish_pose()
def invert_pose(pose): transformer = TransformerROS() transform = transform_from_pose('frame1', 'frame2', pose) transformer.setTransform(transform) frame1_origin_frame1 = pose_stamped('frame1', [0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]) frame1_origin_frame2 = transformer.transformPose('frame2', frame1_origin_frame1) return frame1_origin_frame2.pose
def get_ik(pose): matrix = TransformerROS() # The orientation of /tool0 will be constant q = quaternion_from_euler(0, 3.14, 1.57) matrix2 = matrix.fromTranslationRotation((pose[0]*(-1), pose[1]*(-1), pose[2]), (q[0], q[1], q[2], q[3])) th = invKine(matrix2) sol1 = th[:, 2].transpose() joint_values_from_ik = np.array(sol1) joint_values = joint_values_from_ik[0, :] return joint_values.tolist()
def __init__(self): self.node_name = "Mocap Localization" self.global_map = ObstaclePoseList() self.first = True # process first time self.radius = 1 # to check whether the measurement obstacle is correspond to global map or not self.confi_threshold = 5 # confidence threshold, to determine whether to add point to map or not self.tf = TransformListener() self.transformer = TransformerROS() # Subscribers self.obstacle_list = rospy.Subscriber("/obstacle_list", ObstaclePoseList, self.call_back, queue_size=1) # Publishers self.rviz_pub = rospy.Publisher("/map_viz", Marker, queue_size=1) self.map_list = rospy.Publisher("/map_list", ObstaclePoseList, queue_size=1)
def __init__(self): self.tfr = TransformerROS() self.projector = LaserProjection() self.outputTopic = rospy.get_param('output_topic', 'cloud') self.inputTopic = rospy.get_param('input_topic', 'scan') self.outputFrame = rospy.get_param('output_frame', 'base_link') self.publisher = rospy.Publisher(self.outputTopic, PointCloud2, queue_size=10) self.subscriber = rospy.Subscriber(self.inputTopic, LaserScan, self.laser_callback)
def __init__(self, map_frame, odom_frame, projection_namespace, kitchen_namespace): # Frame names of the map and odom frame self.map_frame = map_frame self.odom_frame = odom_frame # Namespaces self.projection_namespace = projection_namespace self.kitchen_namespace = kitchen_namespace # TF tf_stampeds and static_tf_stampeds of the Robot in the BulletWorld: # These are initialized with the function init_transforms_from_urdf and are # used to update the local transformer with the function update_local_transformer_from_btr self.tf_stampeds = [] self.static_tf_stampeds = [] self.local_transformer = TransformerROS(interpolate=True, cache_time=Duration(10.0)) self.init_local_tf_with_tfs_from_urdf()
def transform_ar_tag(message): """ Input: message of type AlvarMarkers Transforms the encoded PoseStamped to the base frame and stores it in transformed_message """ global transformed_message if message.markers == []: return global counter if counter > 1: return # Create a TransformerROS to transform the AR tag poses we get t = TransformerROS(True, rospy.Duration(10.0)) # Wait for our transform and get it tf_listener.waitForTransform('/head_camera', '/base', rospy.Time(), rospy.Duration(5.0)) (trans, rot) = tf_listener.lookupTransform('/head_camera', '/base', rospy.Time(0)) # Create our TransformStamped object transform = TransformStamped() transform.child_frame_id = 'base' transform.header.frame_id = 'head_camera' transform.header.stamp = rospy.Time(0) transform.transform.translation.x = trans[0] transform.transform.translation.y = trans[1] transform.transform.translation.z = trans[2] transform.transform.rotation.x = rot[0] transform.transform.rotation.y = rot[1] transform.transform.rotation.z = rot[2] transform.transform.rotation.w = rot[3] # Set the transform for t t.setTransform(transform) pose = message.markers[0].pose # Assume one marker for now pose.header.frame_id = '/head_camera' transformed_message = t.transformPose('/base', pose) global board_x, board_y, board_z board_x = transformed_message.pose.position.x board_y = transformed_message.pose.position.y - 0.177 / 2 board_z = transformed_message.pose.position.z - 0.177 / 2 counter += 1
def __init__(self): # specify topic and data type self.sub_bbox_1 = rospy.Subscriber("camera1/detection/vision_objects", DetectedObjectArray, self.bbox_array_callback) self.sub_bbox_6 = rospy.Subscriber("camera6/detection/vision_objects", DetectedObjectArray, self.bbox_array_callback) self.sub_pcd = rospy.Subscriber("/points_raw", PointCloud2, self.pcd_callback) # self.sub_pose = rospy.Subscriber("/current_pose", PoseStamped, self.pose_callback) # Publisher self.pub_intersect_markers = rospy.Publisher( "/vision_objects_position_rviz", MarkerArray, queue_size=10) self.pub_plane_markers = rospy.Publisher("/estimated_plane_rviz", MarkerArray, queue_size=10) self.pub_plane = rospy.Publisher("/estimated_plane", Plane, queue_size=10) self.pub_pcd_inlier = rospy.Publisher("/points_inlier", PointCloud2, queue_size=10) self.pub_pcd_outlier = rospy.Publisher("/points_outlier", PointCloud2, queue_size=10) self.cam1 = camera_setup_1() self.cam6 = camera_setup_6() self.plane = Plane3D(-0.157, 0, 0.988, 1.9) self.plane_world = None self.plane_tracker = None self.sac = RANSAC(Plane3D, min_sample_num=3, threshold=0.22, iteration=200, method="MSAC") self.tf_listener = TransformListener() self.tfmr = Transformer() self.tf_ros = TransformerROS()
def __init__(self): self.node_name = rospy.get_name() self.tf = TransformListener() self.transformer = TransformerROS() rospy.loginfo("[%s] Initializing " % (self.node_name)) #rospy.Subscriber("/pcl_points", PoseArray, self.call_back, queue_size=1, buff_size = 2**24) sub_pcl = message_filters.Subscriber("/pcl_points", PoseArray) sub_odom = message_filters.Subscriber("/odometry/ground_truth", Odometry) ats = ApproximateTimeSynchronizer((sub_pcl, sub_odom), queue_size=1, slop=0.1) ats.registerCallback(self.call_back) rospy.Subscriber("/move_base_simple/goal", PoseStamped, self.cb_new_goal, queue_size=1) self.pub_map = rospy.Publisher('/local_map', OccupancyGrid, queue_size=1) self.pub_rviz = rospy.Publisher("/wp_line", Marker, queue_size=1) self.pub_poses = rospy.Publisher("/path_points", PoseArray, queue_size=1) self.resolution = 0.25 self.robot = Pose() self.width = 200 self.height = 200 self.origin = Pose() self.local_map = OccupancyGrid() self.dilating_size = 6 self.wall_width = 3 self.start_planning = False self.transpose_matrix = None self.goal = [] self.astar = AStar() self.msg_count = 0 self.planning_range = 20 self.frame_id = "map"
try: (trans, rot) = listener.lookupTransform(self.map_frame, self.robot_frame, t) except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): continue transpose_matrix = transformer.fromTranslationRotation(trans, rot) # print(rot) p = np.array([0, 0, 0, 1]) new_p = np.dot(transpose_matrix, p) posestamped = PoseStamped() posestamped.header.frame_id = self.map_frame posestamped.header.stamp = t posestamped.pose.position.x = new_p[0] posestamped.pose.position.y = new_p[1] posestamped.pose.position.z = new_p[2] posestamped.pose.orientation.x = rot[0] posestamped.pose.orientation.y = rot[1] posestamped.pose.orientation.z = rot[2] posestamped.pose.orientation.w = rot[3] self.pub_pose.publish(posestamped) rospy.sleep(0.1) if __name__ == '__main__': rospy.init_node('tf2topic') listener = TransformListener() transformer = TransformerROS() foo = tf2topic() rospy.spin()