示例#1
0
    def get_ros_transform(self,
                          transform=None,
                          frame_id=None,
                          child_frame_id=None):
        """
        Function to provide the current ROS transform

        :return: the ROS transfrom
        :rtype: geometry_msgs.msg.TransformStamped
        """
        tf_msg = TransformStamped()
        if frame_id:
            tf_msg.header = self.get_msg_header(frame_id)
        else:
            tf_msg.header = self.get_msg_header("map")
        if child_frame_id:
            tf_msg.child_frame_id = child_frame_id
        else:
            tf_msg.child_frame_id = self.get_prefix()
        if transform:
            tf_msg.transform = transform
        else:
            tf_msg.transform = trans.carla_transform_to_ros_transform(
                self.carla_actor.get_transform())
        return tf_msg
def vehicle_info_callback(msg):
    print("get vehicle info")
    global get_vehicle
    get_vehicle = True
    global vehicle_info
    vehicle_info = msg

    transform = TransformStamped()
    transform.header = vehicle_info.header
    rotation_matrix = np.array([
        (vehicle_info.pose[0], vehicle_info.pose[1], vehicle_info.pose[2]),
        (vehicle_info.pose[4], vehicle_info.pose[5], vehicle_info.pose[6]),
        (vehicle_info.pose[8], vehicle_info.pose[9], vehicle_info.pose[10])
    ])
    r, p, y = rotationMatrixToEulerAngles(rotation_matrix)
    quaternion = quaternion_from_euler(r, p, y)
    transform.transform.rotation.x = quaternion[0]
    transform.transform.rotation.y = quaternion[1]
    transform.transform.rotation.z = quaternion[2]
    transform.transform.rotation.w = quaternion[3]
    transform.transform.translation.x = vehicle_info.pose[3]
    transform.transform.translation.y = vehicle_info.pose[7]
    transform.transform.translation.z = vehicle_info.pose[11]
    transform.child_frame_id = "global"
    global transforms
    transforms.transforms.append(transform)
    tf_pub.publish(transforms)
示例#3
0
 def update(self):
     try:
         with self.get_god_map():
             robot_links = set(self.unsafe_get_robot().get_link_names())
             attached_links = robot_links - self.original_links
             if attached_links:
                 tf_msg = TFMessage()
                 get_fk = self.unsafe_get_robot().get_fk_pose
                 for link_name in attached_links:
                     parent_link_name = self.unsafe_get_robot().get_parent_link_of_link(link_name)
                     fk = get_fk(parent_link_name, link_name)
                     tf = TransformStamped()
                     tf.header = fk.header
                     tf.header.stamp = rospy.get_rostime()
                     tf.child_frame_id = link_name
                     tf.transform.translation.x = fk.pose.position.x
                     tf.transform.translation.y = fk.pose.position.y
                     tf.transform.translation.z = fk.pose.position.z
                     tf.transform.rotation = normalize_quaternion_msg(fk.pose.orientation)
                     tf_msg.transforms.append(tf)
                 self.tf_pub.publish(tf_msg)
     except KeyError as e:
         pass
     except UnboundLocalError as e:
         pass
     except ValueError as e:
         pass
     return Status.SUCCESS
def transform_cloud_to_map(cloud):
    rospy.loginfo("to map from " + cloud.header.frame_id)

    transformation_store = TransformListener()
    rospy.sleep(2)

    t = transformation_store.getLatestCommonTime("map", cloud.header.frame_id)
    tr_r = transformation_store.lookupTransform("map", cloud.header.frame_id,
                                                t)

    tr = Transform()
    tr.translation = Vector3(tr_r[0][0], tr_r[0][1], tr_r[0][2])
    tr.rotation = Quaternion(tr_r[1][0], tr_r[1][1], tr_r[1][2], tr_r[1][3])
    tr_s = TransformStamped()
    tr_s.header = std_msgs.msg.Header()
    tr_s.header.stamp = rospy.Time.now()
    tr_s.header.frame_id = "map"
    tr_s.child_frame_id = cloud.header.frame_id
    tr_s.transform = tr
    t_kdl = transform_to_kdl(tr_s)
    points_out = []
    for p_in in pc2.read_points(cloud, field_names=["x", "y", "z", "rgb"]):
        p_out = t_kdl * PyKDL.Vector(p_in[0], p_in[1], p_in[2])
        points_out.append([p_out[0], p_out[1], p_out[2]])

    cloud.header.frame_id = "map"
    res = pc2.create_cloud(cloud.header, cloud.fields, points_out)
    rospy.loginfo(cloud.header)
    return res
    def orb_pose_cb(self, msg):
        """
        :type msg: PoseStamped
        :param msg:
        :return:
        """
        if not self.map_initialized:
            self.map_initialized = True
            static_request = SetStaticTfRequest()
            static_request.tf = self.map_tf
            static_request.tf.header.frame_id = "Unity"
            static_request.tf.child_frame_id = "map"
            self.static_srv.call(static_request)
            rospy.loginfo("Requesting static")

            topic_request = GetStampedTopicTfRequest()
            topic_request.out_frame = "Child"
            topic_request.topic_name = "/orb_slam2_mono/map_points"
            topic_request.out_topic_name_recommendation = "/unity/map_points"
            self.tf_topic_srv.call(topic_request)
            rospy.loginfo("Requesting Topic Transformation for PointCloud2")
        else:
            tf_msg = TransformStamped()
            tf_msg.header = msg.header
            tf_msg.header.frame_id = "map"
            tf_msg.child_frame_id = "camera_link"
            tf_msg.transform.translation.x = msg.pose.position.x
            tf_msg.transform.translation.y = msg.pose.position.y
            tf_msg.transform.translation.z = msg.pose.position.z
            tf_msg.transform.rotation.x = msg.pose.orientation.x
            tf_msg.transform.rotation.y = msg.pose.orientation.y
            tf_msg.transform.rotation.z = msg.pose.orientation.z
            tf_msg.transform.rotation.w = msg.pose.orientation.w
            self.orb_tf_pub.publish(tf_msg)
 def broadcast_static_frame(self, name, pose_stamped):
     frame = TransformStamped()
     frame.header = pose_stamped.header
     frame.child_frame_id = name
     frame.transform.translation = pose_stamped.pose.position
     frame.transform.rotation = pose_stamped.pose.orientation
     self.tf_static.sendTransform(frame)
    def navigate(self):
        rate = self.rospy.Rate(50) # 10hz
        
        msg = TransformStamped()
        msg.header = Header() 
        msg.header.frame_id = "usb_cam"
        msg.header.stamp = rospy.Time.now()

        while 1:

            msg.transform.translation.x = self.currentx
            msg.transform.translation.y = self.currenty
            msg.transform.translation.z = self.currentz
            msg.transform.rotation.x = self.currentxo
            msg.transform.rotation.y = self.currentyo
            msg.transform.rotation.z = self.currentzo
            msg.transform.rotation.w = self.currentwo
            '''
            msg.pose.position.x = self.currentx
            msg.pose.position.y = self.currenty
            msg.pose.position.z = self.currentz
            msg.pose.orientation.x = self.currentxo
            msg.pose.orientation.y = self.currentyo
            msg.pose.orientation.z = self.currentzo
            msg.pose.orientation.w = self.currentwo
            '''
            # For demo purposes we will lock yaw/heading to north.
            #yaw_degrees = 0  # North
            #yaw = radians(yaw_degrees)
            #quaternion = quaternion_from_euler(0, 0, yaw)
            #msg.pose.orientation = Quaternion(*quaternion)

            self.pub.publish(msg)

            rate.sleep()
示例#8
0
 def on_result(self, boxes, classes):
     rospy.logdebug("on_result")
     msg = ObjectDetection()
     msg.header = boxes.header
     for box, label, prob in zip(boxes.boxes, classes.label_names,
                                 classes.label_proba):
         if not label:
             continue
         pose = Object6DPose()
         pose.pose = box.pose
         pose.reliability = prob
         pose.type = label
         msg.objects.append(pose)
         if self.publish_tf:
             t = TransformStamped()
             t.header = box.header
             t.child_frame_id = label
             t.transform.translation.x = box.pose.position.x
             t.transform.translation.y = box.pose.position.y
             t.transform.translation.z = box.pose.position.z
             t.transform.rotation.x = box.pose.orientation.x
             t.transform.rotation.y = box.pose.orientation.y
             t.transform.rotation.z = box.pose.orientation.z
             t.transform.rotation.w = box.pose.orientation.w
             self.tfb.sendTransform(t)
     self.pub_detect.publish(msg)
示例#9
0
def pose_to_tf(pose, child_frame_id):
    tf = TransformStamped()
    tf.header = pose.header
    tf.child_frame_id = child_frame_id
    tf.transform.translation = pose.pose.position
    tf.transform.rotation = pose.pose.orientation
    return tf
示例#10
0
def invert_transform(msg):
    """
    The purpose of this is to invert a TransformStamped message.
    :type msg: TransformStamped
    :param msg:
    :return: TransformStamped that has been inverted
    """
    trans_mat = translation_matrix([
        msg.transform.translation.x, msg.transform.translation.y,
        msg.transform.translation.z
    ])
    quat_mat = quaternion_matrix([
        msg.transform.rotation.x, msg.transform.rotation.y,
        msg.transform.rotation.z, msg.transform.rotation.w
    ])
    homogenous_transformation = concatenate_matrices(trans_mat, quat_mat)
    inverse_transformation = inverse_matrix(homogenous_transformation)
    translation = translation_from_matrix(inverse_transformation)
    quaternion = quaternion_from_matrix(inverse_transformation)
    t = TransformStamped()
    t.header = msg.header
    t.child_frame_id = msg.child_frame_id
    t.transform.translation.x = translation[0]
    t.transform.translation.y = translation[1]
    t.transform.translation.z = translation[2]
    t.transform.rotation.x = quaternion[0]
    t.transform.rotation.y = quaternion[1]
    t.transform.rotation.z = quaternion[2]
    t.transform.rotation.w = quaternion[3]

    return t
 def on_result(self, boxes, classes):
     rospy.logdebug("on_result")
     msg = ObjectDetection()
     msg.header = boxes.header
     for box, label, prob in zip(boxes.boxes, classes.label_names, classes.label_proba):
         if not label:
             continue
         pose = Object6DPose()
         pose.pose = box.pose
         pose.reliability = prob
         pose.type = label
         msg.objects.append(pose)
         if self.publish_tf:
             t = TransformStamped()
             t.header = box.header
             t.child_frame_id = label
             t.transform.translation.x = box.pose.position.x
             t.transform.translation.y = box.pose.position.y
             t.transform.translation.z = box.pose.position.z
             t.transform.rotation.x = box.pose.orientation.x
             t.transform.rotation.y = box.pose.orientation.y
             t.transform.rotation.z = box.pose.orientation.z
             t.transform.rotation.w = box.pose.orientation.w
             self.tfb.sendTransform(t)
     self.pub_detect.publish(msg)
示例#12
0
def callback(msg):
    print "got imu"
    # Offset to put the cloud in a different place than the map:
    x_offset = 120
    y_offset = 0
    p = PoseWithCovarianceStamped()
    p.header = msg.header
    p.header.frame_id = "map"

    p.pose.pose.orientation = msg.orientation
    p.pose.pose.position.x = x_offset
    p.pose.pose.position.y = y_offset

    pub.publish(p)

    tfmsg = TFMessage()
    transformS = TransformStamped()

    transformS.header = msg.header
    transform = Transform()
    transform.rotation = msg.orientation
    transform.translation.x = x_offset
    transform.translation.y = y_offset
    transformS.transform = transform
    transformS.child_frame_id = "base"
    tfmsg.transforms.append(transformS)
    pub_tf.publish(tfmsg)
示例#13
0
 def pub_transform(self, detection):
     tfs = TransformStamped()
     tfs.header = detection.header
     tfs.child_frame_id = detection.label + '_' + str(detection.id)
     tfs.transform.translation = detection.pose.pose.position
     tfs.transform.rotation = detection.pose.pose.orientation
     self._tf2_bcaster.sendTransform(tfs)
示例#14
0
def apply_transform_inv(from_tf, to_tf):
    def invert(trfm):
        new_tf = deepcopy(trfm)
        quat_list = [
            trfm.transform.rotation.x, trfm.transform.rotation.y,
            trfm.transform.rotation.z, trfm.transform.rotation.w
        ]
        rot = quaternion_matrix(quat_list)[:3, :3]
        quat_inv = quaternion_inverse(quat_list)
        [
            new_tf.transform.translation.x, new_tf.transform.translation.y,
            new_tf.transform.translation.z
        ] = np.matmul(rot.T, [
            -trfm.transform.translation.x, -trfm.transform.translation.y,
            -trfm.transform.translation.z
        ])
        [
            new_tf.transform.rotation.x, new_tf.transform.rotation.y,
            new_tf.transform.rotation.z, new_tf.transform.rotation.w
        ] = [quat_inv[0], quat_inv[1], quat_inv[2], quat_inv[3]]

        return new_tf

    def get_R(trfm):
        quat_list = trfm.transform.rotation.x, trfm.transform.rotation.y, trfm.transform.rotation.z, trfm.transform.rotation.w
        rot_matrix = quaternion_matrix(quat_list)
        return rot_matrix[:3, :3]

    new_tf = TransformStamped()
    new_tf.header = from_tf.header
    new_tf.child_frame_id = to_tf.child_frame_id

    to_tf_inv = invert(to_tf)
    R = get_R(from_tf)

    [
        new_tf.transform.translation.x, new_tf.transform.translation.y,
        new_tf.transform.translation.z
    ] = [
        from_tf.transform.translation.x, from_tf.transform.translation.y,
        from_tf.transform.translation.z
    ] + np.matmul(R, [
        to_tf_inv.transform.translation.x, to_tf_inv.transform.translation.y,
        to_tf_inv.transform.translation.z
    ])

    new_euler = quaternion_multiply([
        from_tf.transform.rotation.x, from_tf.transform.rotation.y,
        from_tf.transform.rotation.z, from_tf.transform.rotation.w
    ], [
        to_tf_inv.transform.rotation.x, to_tf_inv.transform.rotation.y,
        to_tf_inv.transform.rotation.z, to_tf_inv.transform.rotation.w
    ])

    [
        new_tf.transform.rotation.x, new_tf.transform.rotation.y,
        new_tf.transform.rotation.z, new_tf.transform.rotation.w
    ] = [new_euler[0], new_euler[1], new_euler[2], new_euler[3]]

    return new_tf
示例#15
0
def transform_helper(vec3, frame):
    pos = TransformStamped()
    pos.child_frame_id = frame
    pos.header = Header()
    pos.transform = Transform()
    pos.transform.translation = vec3

    return pos
示例#16
0
def transform_helper(vec3, frame):
    pos = TransformStamped()
    pos.child_frame_id = frame
    pos.header = Header()
    pos.transform = Transform()
    pos.transform.translation = vec3

    return pos
示例#17
0
    def _publish_tf(self, poseStamped, name="moveit_target"):

        transform = TransformStamped()
        transform.header = poseStamped.header
        transform.transform.translation = poseStamped.pose.position
        transform.transform.rotation = poseStamped.pose.orientation
        transform.child_frame_id = name
        self.br.sendTransformMessage(transform)
示例#18
0
 def add_frame_from_pose(self, name, pose_stamped):
     with self.broadcasting_frames_lock:
         frame = TransformStamped()
         frame.header = pose_stamped.header
         frame.child_frame_id = name
         frame.transform.translation = pose_stamped.pose.position
         frame.transform.rotation = pose_stamped.pose.orientation
         self.broadcasting_frames.append(frame)
def pose_to_tf(pose_msg, child_frame_id):
    ts = TransformStamped()
    ts.header = pose_msg.header
    ts.child_frame_id = child_frame_id
    ts.transform.translation = pose_msg.pose.position
    ts.transform.rotation = pose_msg.pose.orientation
    msg = TFMessage()
    msg.transforms.append(ts)
    return msg
    def odometry_process(self, x, y, theta, header):
        """
        Callback function that is executed upon reception of new odometry data.
        """

        # Prepare the new ROS message for the processed odometry
        odometry = TransformStamped()
        odometry.header.seq = 0
        odometry.header = header
        odometry.header.frame_id = self.ACQ_DEVICE_NAME
        odometry.child_frame_id = self.ACQ_DEVICE_NAME

        # Transform the incoming data to quaternions
        transform_current = np.array([[math.cos(theta), -1.0 * math.sin(theta), x],
                                      [math.sin(theta), math.cos(theta), y],
                                      [0.0, 0.0, 1.0]])

        transform_previous = np.array([[math.cos(self.previousOdometry["theta"]), -1.0 * math.sin(self.previousOdometry["theta"]), self.previousOdometry["x"]],
                                           [math.sin(self.previousOdometry["theta"]), math.cos(self.previousOdometry["theta"]), self.previousOdometry["y"]],
                                           [0.0, 0.0, 1.0]])

        transform_previous_inv = np.linalg.inv(transform_previous)

        self.previousOdometry = {'x': x, 'y': y, 'theta': theta}

        transform_relative = np.matmul(transform_previous_inv, transform_current)

        angle = math.atan2(transform_relative[1][0], transform_relative[0][0])

        x = transform_relative[0][2]

        y = transform_relative[1][2]


        cy = math.cos(angle * 0.5)
        sy = math.sin(angle * 0.5)
        cp = 1.0
        sp = 0.0
        cr = 1.0
        sr = 0.0

        q_w = cy * cp * cr + sy * sp * sr
        q_x = cy * cp * sr - sy * sp * cr
        q_y = sy * cp * sr + cy * sp * cr
        q_z = sy * cp * cr - cy * sp * sr


        # Save the resuts to the new odometry relative pose message
        odometry.transform.translation.x = x
        odometry.transform.translation.y = y
        odometry.transform.translation.z = 0
        odometry.transform.rotation.x = q_x
        odometry.transform.rotation.y = q_y
        odometry.transform.rotation.z = q_z
        odometry.transform.rotation.w = q_w

        return odometry
示例#21
0
 def callback(self, data):
     tfmsg = TransformStamped()
     tfmsg.header = data.header
     tfmsg.child_frame_id = "pointcloud_camera"
     tfmsg.transform.translation.x = data.pose.pose.position.x
     tfmsg.transform.translation.y = data.pose.pose.position.y
     tfmsg.transform.translation.z = data.pose.pose.position.z
     tfmsg.transform.rotation = data.pose.pose.orientation
     self.tfmsg_pub.publish(tfmsg)
示例#22
0
    def publish_odometry(self):
        quat = tf.transformations.quaternion_from_euler(0, 0, self.yaw)
        quaternion = Quaternion()
        quaternion.x = quat[0]
        quaternion.y = quat[1]
        quaternion.z = quat[2]
        quaternion.w = quat[3]

        if self.last_time is None:
            self.last_time = rospy.Time.now()

        vx = self.vx #(msg->twist.twist.linear.x) *trans_mul_;
        vy = self.vy #msg->twist.twist.linear.y;
        vth = self.vyaw #(msg->twist.twist.angular.z) *rot_mul_;

        current_time = rospy.Time.now()

        dt = (current_time - self.last_time).to_sec()
        delta_x = (vx * math.cos(self.yaw) - vy * math.sin(self.yaw)) * dt
        delta_y = (vx * math.sin(self.yaw) + vy * math.cos(self.yaw)) * dt
        delta_th = vth * dt

        self.x += delta_x
        self.y += delta_y
        self.yaw += delta_th

        odom = Odometry()
        odom.header.stamp = rospy.Time.now()
        odom.header.frame_id = "odom"
        odom.child_frame_id = "base_link"
        odom.pose.pose.position.x = self.x
        odom.pose.pose.position.y = self.y
        odom.pose.pose.position.z = 0.0
        odom.pose.pose.orientation = quaternion
        odom.twist.twist.linear.x = vx
        odom.twist.twist.linear.y = vy
        odom.twist.twist.angular.z = vth

        self.odom_pub.publish(odom)

        transform_stamped = TransformStamped()
        transform_stamped.header = odom.header

        transform_stamped.transform.translation.x = self.x
        transform_stamped.transform.translation.y = self.y
        transform_stamped.transform.translation.z = 0.0
        transform_stamped.transform.rotation = quaternion

        #self.tfbr.sendTransform(transform_stamped)
        self.tfbr.sendTransform((self.x, self.y, 0.0),
                                (quat[0], quat[1], quat[2], quat[3]),
                                rospy.Time.now(),
                                "odom",
                                "base_link")

        self.last_time = current_time
def poseCB(msg_in):
    msg_out = TransformStamped()
    msg_out.header = msg_in.header
    msg_out.child_frame_id = robot_id
    msg_out.transform.translation.x = msg_in.pose.pose.position.x
    msg_out.transform.translation.y = msg_in.pose.pose.position.y
    msg_out.transform.translation.z = msg_in.pose.pose.position.z
    msg_out.transform.rotation = msg_in.pose.pose.orientation
    if sim_mode:
        msg_out.header.stamp = rospy.Time.from_sec(time.time())
    pub.publish(msg_out)
    def publish(self):
        """Publish images from AirSim to ROS"""
        responses = self._airsim_client.simGetImages([
            # uncompressed RGB array bytes
            ImageRequest(self._camera_name, ImageType.Scene, compress=False),
            # # infrared uncompressed image
            # ImageRequest(self._camera_name, ImageType.Infrared, compress=False),
            # # floating point uncompressed image
            # ImageRequest(self._camera_name, ImageType.DepthPlanner, pixels_as_float=True, compress=False),
        ], self._vehicle_name)
        color_response = responses[0]
        # ir_response = responses[1]
        # depth_response = responses[2]

        header = Header()
        header.stamp = self.get_clock().now().to_msg()
        # TODO: implement parameter for frame id, also decide on if each separate image type should have a different frame id
        #  This may mean we should load the ids via ros parameters
        header.frame_id = self._camera_frame_id

        # Handle cam info it has not been found yet
        if self._vehicle_name not in self._cam_info_msgs.keys():
            self._cam_info_msgs[self._vehicle_name] = {}
            cam_info = self._airsim_client.simGetCameraInfo(self._camera_name, self._vehicle_name)
            d_params = self._airsim_client.simGetDistortionParams(self._camera_name, self._vehicle_name)
            self.get_logger().info(f"{d_params}")
            self.get_logger().info(f"""
            HFOV: {cam_info.fov},
            PROJ: {cam_info.proj_mat}
            """)
            # TODO: implement multiple cameras for each lens on realsense and update this method
            self._cam_info_msgs[self._vehicle_name]["color"] = construct_info(header, cam_info, color_response.height, color_response.width)
            # self._cam_info_msgs[self._vehicle_name]["ir"] = self._cam_info_msgs[self._vehicle_name]["color"]

        image_color = construct_image(header, color_response, "bgr8")
        # image_ir = construct_image(header, ir_response, "rgb8")
        # image_depth = construct_image(header, depth_response, "rgb8")

        # TODO: use camera pose from airsim
        tfmsg = TransformStamped()
        translation = Vector3(x=0., y=0., z=0.)
        tfmsg.transform.translation = translation
        tfmsg.transform.rotation = Quaternion(x=0., y=0., z=0., w=1.)
        tfmsg.child_frame_id = self._camera_frame_id
        tf_header = Header()
        tf_header.stamp = header.stamp
        tfmsg.header = tf_header
        tfmsg.header.frame_id = "world"
        self.br.sendTransform(tfmsg)

        self._pub_color.publish(image_color)
        # self._pub_ir.publish(image_ir)
        # self._pub_depth.publish(image_depth)
        self._pub_info_color.publish(self._cam_info_msgs[self._vehicle_name]["color"])
示例#25
0
 def copy_transform(self, transform, child_frame_id=None):
     copied_tf = TransformStamped()
     copied_tf.header = transform.header
     if child_frame_id:
         copied_tf.child_frame_id = child_frame_id
     else:
         copied_tf.child_frame_id = transform.child_frame_id
     copied_tf.transform = transform.transform
     copied_tf.header.stamp = rospy.Time.now()
     
     return copied_tf
示例#26
0
 def get_tf_msg(self):
     """
     Helper Function used to create a ROS tf message for this child
     :return: The Filled Tf message
     :rtype: geometry_msgs.msg.TransformStamped
     """
     tf_msg = TransformStamped()
     tf_msg.header = self.get_msg_header()
     tf_msg.child_frame_id = self.get_frame_ID()
     tf_msg.transform = self.get_current_ros_transform()
     return tf_msg
示例#27
0
 def stateCb(self, msg, veh):
     t = TransformStamped()
     t.header = msg.header
     t.child_frame_id = veh + "/imu"
     t.transform.translation.x = msg.pos.x
     t.transform.translation.y = msg.pos.y
     t.transform.translation.z = msg.pos.z
     t.transform.rotation.x = msg.quat.x
     t.transform.rotation.y = msg.quat.y
     t.transform.rotation.z = msg.quat.z
     t.transform.rotation.w = msg.quat.w
     self.tf.sendTransform(t)
示例#28
0
    def get_tf_msg(self):
        """
        Helper function to create a ROS tf message of this child

        :return: the filled tf message
        :rtype: geometry_msgs.msg.TransformStamped
        """
        tf_msg = TransformStamped()
        tf_msg.header = self.get_msg_header()
        tf_msg.child_frame_id = self.get_frame_id()
        tf_msg.transform = self.get_current_ros_transfrom()
        return tf_msg
示例#29
0
def CreateBag(matfile, bagname, frame_id, navsat_topic="/fix"):
    '''Creates the actual bag file by successively adding images'''
    bag = rosbag.Bag(bagname, 'w', compression=rosbag.Compression.BZ2, chunk_threshold=32 * 1024 * 1024)
    data = loadmat(matfile)['rnv']
    rnv = {}
    for i, col in enumerate(data.dtype.names):
        rnv[col] = data.item()[i]
    try:
        ref = None
        x, y = [], []
        for i, t in enumerate(tqdm(rnv['t'])):
            # print("Adding %s" % image_name)

            stamp = rospy.Time.from_sec(t)
            nav_msg = NavSatFix()
            nav_msg.header.stamp = stamp
            nav_msg.header.frame_id = "map"
            nav_msg.header.seq = i
            nav_msg.latitude = rnv['lat'][i][0]
            nav_msg.longitude = rnv['lon'][i][0]
            nav_msg.altitude = -rnv['depth'][i][0]
            nav_msg.position_covariance_type = nav_msg.COVARIANCE_TYPE_UNKNOWN

            transform_msg = TransformStamped()
            transform_msg.header = copy(nav_msg.header)
            utm = fromLatLong(nav_msg.latitude, nav_msg.longitude, nav_msg.altitude)
            if ref is None:
                ref = utm.toPoint()
            x.append(utm.toPoint().x - ref.x)
            y.append(utm.toPoint().y - ref.y)
            dx = x[-1] - sum(x[-min(20, len(x)):]) / min(20, len(x))
            dy = y[-1] - sum(y[-min(20, len(y)):]) / min(20, len(y))
            transform_msg.transform.translation.x = x[-1]
            transform_msg.transform.translation.y = y[-1]
            transform_msg.transform.translation.z = nav_msg.altitude
            transform_msg.transform.rotation = Quaternion(*tf_conversions.transformations.quaternion_from_euler(0, 0,
                                                                                                    math.atan2(dy, dx)))
            transform_msg.child_frame_id = frame_id
            tf_msg = tfMessage(transforms=[transform_msg])

            odometry_msg = Odometry()
            odometry_msg.header = copy(transform_msg.header)
            odometry_msg.child_frame_id = frame_id
            odometry_msg.pose.pose.position = transform_msg.transform.translation
            odometry_msg.pose.pose.orientation = transform_msg.transform.rotation

            bag.write(navsat_topic, nav_msg, stamp)
            bag.write("/tf", tf_msg, stamp)
            bag.write("/transform", transform_msg, stamp)
            bag.write("/odom", odometry_msg, stamp)
    finally:
        bag.close()
def pose_stamped_to_transform_stamped(ps):
    ts = TransformStamped()
    ts.header = ps.header
    ts.transform.translation.x = ps.pose.position.x
    ts.transform.translation.y = ps.pose.position.y
    ts.transform.translation.z = ps.pose.position.z

    ts.transform.rotation.x = ps.pose.orientation.x
    ts.transform.rotation.y = ps.pose.orientation.y
    ts.transform.rotation.z = ps.pose.orientation.z
    ts.transform.rotation.w = ps.pose.orientation.w

    return ts
示例#31
0
def callback_pose(msg_in):
    # Create a transform at the time
    # from the pose message for where
    # the UAV is in the map
    t = TransformStamped()
    t.header = msg_in.header
    t.child_frame_id = uav_name

    t.transform.translation = msg_in.pose.position
    t.transform.rotation = msg_in.pose.orientation

    # Send the transformation
    tfbr.sendTransform(t)
示例#32
0
 def get_tf_msg(self):
     """
     Override Function used to create a ROS tf message for this sensor
     The reported transform of the sensor is in respect to the global frame.
     :return: The Filled tf Message
     :rtype: geometry_msgs.msg.TransformStamped
     """
     tf_msg = TransformStamped()
     tf_msg.header = self.get_msg_header()
     tf_msg.header.frame_id = "/map"
     tf_msg.child_frame_id = self.get_frame_ID()
     tf_msg.transform = self.get_current_ros_transform()
     return tf_msg
示例#33
0
 def publish_box_tf(self, box, label):
     pos, ori = (box.pose.position, box.pose.orientation)
     t = TransformStamped()
     t.header = box.header
     t.child_frame_id = label
     t.transform.translation.x = pos.x
     t.transform.translation.y = pos.y
     t.transform.translation.z = pos.z
     t.transform.rotation.x = ori.x
     t.transform.rotation.y = ori.y
     t.transform.rotation.z = ori.z
     t.transform.rotation.w = ori.w
     self.tfb.sendTransform(t)
示例#34
0
def pose_cb(pose_msg):
    trans = TransformStamped()
    trans.child_frame_id = "handle_orig"
    trans.header = pose_msg.header
    trans.transform.rotation = pose_msg.pose.orientation
    trans.transform.translation.x = pose_msg.pose.position.x
    trans.transform.translation.y = pose_msg.pose.position.y
    trans.transform.translation.z = pose_msg.pose.position.z
    set_tf(10, trans)  # orig

    try:
        transed_pose = listener.transformPose("BODY", pose_msg)
    except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException), e:
        print "tf error: %s" % e
        return
 def publish_filtered_tf(self, header):
     yaw = np.median(self.buffer)
     q = tfs.quaternion_from_euler(0, 0, yaw - math.pi)
     ts = TransformStamped()
     ts.header = header
     ts.header.frame_id = self.source
     ts.child_frame_id = self.goal
     ts.transform.translation.x = self.position[0]
     ts.transform.translation.y = self.position[1]
     ts.transform.translation.z = 0
     ts.transform.rotation.x = q[0]
     ts.transform.rotation.y = q[1]
     ts.transform.rotation.z = q[2]
     ts.transform.rotation.w = q[3]
     msg = tfMessage()
     msg.transforms.append(ts)
     self.tf_pub.publish(msg)
示例#36
0
 def publish_filtered_tf(self, header):
     m = np.median(self.buffer, axis=0)
     if not self.filter_position:
         m[0:3] = self.last_transform[0:3]
     q = quaternion_from_euler(m[3], m[4], m[5])
     ts = TransformStamped()
     ts.header = header
     ts.child_frame_id = self.frame_id + '_filtered'
     ts.transform.translation.x = m[0]
     ts.transform.translation.y = m[1]
     ts.transform.translation.z = m[2]
     ts.transform.rotation.x = q[0]
     ts.transform.rotation.y = q[1]
     ts.transform.rotation.z = q[2]
     ts.transform.rotation.w = q[3]
     msg = tfMessage()
     msg.transforms.append(ts)
     self.tf_pub.publish(msg)
    def callback(self, data):
        rospy.loginfo("Objects %d", data.objects.__len__())

        table_corners = []

        # obtain table_offset and table_pose
        to = rospy.wait_for_message(self.table_topic, MarkerArray);

        # obtain Table corners ...
        rospy.loginfo("Tables hull size %d", to.markers.__len__())
        if not to.markers:
            rospy.loginfo("No tables detected")
            return
        else:
            # NB - D says that ORK has this set to filter based on height.
            # IIRC, it's 0.6-0.8m above whatever frame is set as the floor
            point_array_size = 4      # for the 4 corners of the bounding box
            for i in range (0, point_array_size):
                p = Point()
                p.x = to.markers[0].points[i].x
                p.y = to.markers[0].points[i].y
                p.z = to.markers[0].points[i].z
                table_corners.append(p)
            # this is a table pose at the edge close to the robot, in the center of x axis
            table_pose = PoseStamped()
            table_pose.header = to.markers[0].header
            table_pose.pose = to.markers[0].pose
            
        # Determine table dimensions
        rospy.loginfo('calculating table pose bounding box in frame: %s' % table_pose.header.frame_id)
        min_x = sys.float_info.max
        min_y = sys.float_info.max
        max_x = -sys.float_info.max
        max_y = -sys.float_info.max

        for i in range (table_corners.__len__()):
            if table_corners[i].x > max_x:
                max_x = table_corners[i].x
            if table_corners[i].y > max_y:
                max_y = table_corners[i].y
            if table_corners[i].x < min_x:
                min_x = table_corners[i].x
            if table_corners[i].y < min_y:
                min_y = table_corners[i].y

        table_dim = Point()
        # TODO: if we don't (can't!) compute the height, should we at least give it non-zero depth? 
        # (would also require shifting the observed centroid down by table_dim.z/2)
        table_dim.z = 0.0

        table_dim.x = abs(max_x - min_x)
        table_dim.y = abs(max_y - min_y)
        print "Dimensions: ", table_dim.x, table_dim.y

        # Temporary frame used for transformations
        table_link = 'table_link'

        # centroid of a table in table_link frame
        centroid = PoseStamped()
        centroid.header.frame_id = table_link
        centroid.header.stamp = table_pose.header.stamp
        centroid.pose.position.x = (max_x + min_x)/2.
        centroid.pose.position.y = (max_y + min_y)/2.
        centroid.pose.position.z = 0.0
        centroid.pose.orientation.x = 0.0
        centroid.pose.orientation.y = 0.0
        centroid.pose.orientation.z = 0.0
        centroid.pose.orientation.w = 1.0

        # generate transform from table_pose to our newly-defined table_link
        tt = TransformStamped()
        tt.header = table_pose.header
        tt.child_frame_id = table_link
        tt.transform.translation = table_pose.pose.position
        tt.transform.rotation = table_pose.pose.orientation
        self.tl.setTransform(tt)
        self.tl.waitForTransform(table_link, table_pose.header.frame_id, table_pose.header.stamp, rospy.Duration(3.0))
        if self.tl.canTransform(table_pose.header.frame_id, table_link, table_pose.header.stamp):
            centroid_table_pose = self.tl.transformPose(table_pose.header.frame_id, centroid)
            self.pose_pub.publish(centroid_table_pose)
        else:
            rospy.logwarn("No transform between %s and %s possible",table_pose.header.frame_id, table_link)
            return

        # transform each object into desired frame; add to list of clusters
        cluster_list = []
        for i in range (data.objects.__len__()):
            rospy.loginfo("Point clouds %s", data.objects[i].point_clouds.__len__())

            pc = PointCloud2()
            pc = data.objects[i].point_clouds[0]
            arr = pointclouds.pointcloud2_to_array(pc, 1)
            arr_xyz = pointclouds.get_xyz_points(arr)

            arr_xyz_trans = []
            for j in range (arr_xyz.__len__()):
                ps = PointStamped();
                ps.header.frame_id = table_link
                ps.header.stamp = table_pose.header.stamp
                ps.point.x = arr_xyz[j][0]
                ps.point.y = arr_xyz[j][1]
                ps.point.z = arr_xyz[j][2]
                if self.tl.canTransform(table_pose.header.frame_id, table_link, table_pose.header.stamp):
                    ps_in_kinect_frame = self.tl.transformPoint(table_pose.header.frame_id, ps)
                else:
                    rospy.logwarn("No transform between %s and %s possible",table_pose.header.frame_id, table_link)
                    return
                arr_xyz_trans.append([ps_in_kinect_frame.point.x, ps_in_kinect_frame.point.y, ps_in_kinect_frame.point.z])

            pc_trans = PointCloud2()
            pc_trans = pointclouds.xyz_array_to_pointcloud2(np.asarray([arr_xyz_trans]), 
                                                            table_pose.header.stamp, table_pose.header.frame_id)

            self.pub.publish(pc_trans)
            cluster_list.append(pc_trans)
            rospy.loginfo("cluster size %d", cluster_list.__len__())

        # finally - save all data in the object that'll be sent in response to actionserver requests
        with self.result_lock:
            self._result.objects = cluster_list 
            self._result.table_dims = table_dim
            self._result.table_pose = centroid_table_pose
            self.has_data = True