示例#1
0
    def calculate_center_of_mass(self):
        x = 0
        y = 0
        z = 0

        for link in self.links:
            # print (self.links[link])  # get structure of link
            try:
                # get transform of each link with respect to base link
                self.tf_base_to_link = self.tf_buffer.lookup_transform(
                    "base_link", link, rospy.Time(), rospy.Duration(
                        1.0))  # 1 second timeout, blocks until it finds it
                self.link_origin.point.x = self.links[
                    link].inertial.origin.xyz[0]
                self.link_origin.point.y = self.links[
                    link].inertial.origin.xyz[1]
                self.link_origin.point.z = self.links[
                    link].inertial.origin.xyz[2]
                self.link_origin.header.frame_id = link
                self.link_origin.header.stamp = rospy.get_rostime()

                tf_base_to_link_origin = tf_msg.do_transform_point(
                    self.link_origin, self.tf_base_to_link)

                # calculate part of CoM equation depending on link
                x += self.links[link].inertial.mass * \
                    tf_base_to_link_origin.point.x
                y += self.links[link].inertial.mass * \
                    tf_base_to_link_origin.point.y
                z += self.links[link].inertial.mass * \
                    tf_base_to_link_origin.point.z

            except tf2_ros.TransformException as err:
                rospy.logerr(
                    "Calculate center of mass. TF error in COM computation %s",
                    err)

        # finish CoM calculation
        self.center_of_mass.header.stamp = rospy.Time.now()
        self.center_of_mass.header.frame_id = "base_link"
        self.center_of_mass.point.x = x / self.mass
        self.center_of_mass.point.y = y / self.mass
        self.center_of_mass.point.z = z / self.mass

        self.pub_com.publish(self.center_of_mass)

        # project COM into ground, z=0

        self.tf_base_to_world = self.tf_buffer.lookup_transform(
            "world", "base_link", rospy.Time(),
            rospy.Duration(1.0))  # 1 second timeout, blocks until it finds it
        self.tf_com_location_to_world = tf_msg.do_transform_point(
            self.center_of_mass, self.tf_base_to_world)

        self.center_of_mass_projected.header.frame_id = "world"
        self.center_of_mass_projected.header.stamp = rospy.Time.now()
        self.center_of_mass_projected.point.x = self.tf_com_location_to_world.point.x
        self.center_of_mass_projected.point.y = self.tf_com_location_to_world.point.y
        self.center_of_mass_projected.point.z = 0  # zero height
        self.pub_projected.publish(self.center_of_mass_projected)
def get_obj_frames(blue_obj, red_obj, base_tf, base2kinect_tf):
    base_pose = Pose()
    base_pose.position = base_tf.transform.translation
    base_pose.orientation = base_tf.transform.rotation
    base_kdl = tf_conversions.fromMsg(base_pose)
    base_unitX = base_kdl.M.UnitX()
    base_unitY = base_kdl.M.UnitY()
    base_unitZ = base_kdl.M.UnitZ()

    ### Frame for Blue Object

    blue_center_kinect = PointStamped()
    blue_center_kinect.header = base2kinect_tf.header
    blue_center_kinect.point = blue_obj.bb_center
    blue_center = tf2_geometry_msgs.do_transform_point(blue_center_kinect,
                                                       base2kinect_tf)

    blue_pose = Pose()
    blue_pose.position = blue_center.point
    blue_pose.position.z = blue_pose.position.z - blue_obj.bb_dims.z / 2
    blue_pose.orientation = base_tf.transform.rotation
    blue_kdl = tf_conversions.fromMsg(blue_pose)
    blue_pos = blue_kdl.p
    blue_rot = PyKDL.Rotation(-base_unitX, -base_unitY, base_unitZ)
    blue_kdl = PyKDL.Frame(blue_rot, blue_pos)
    blue_pose = tf_conversions.toMsg(blue_kdl)

    blue_frame = TransformStamped()
    blue_frame.header.frame_id = base_frame
    blue_frame.header.stamp = rospy.Time.now()
    blue_frame.child_frame_id = 'blue_frame'
    blue_frame.transform.translation = blue_pose.position
    blue_frame.transform.rotation = blue_pose.orientation

    ### Frame for Red Object

    red_center_kinect = PointStamped()
    red_center_kinect.header = base2kinect_tf.header
    red_center_kinect.point = red_obj.bb_center
    red_center = tf2_geometry_msgs.do_transform_point(red_center_kinect,
                                                      base2kinect_tf)

    red_pose = Pose()
    red_pose.position = red_center.point
    red_pose.position.z = red_pose.position.z - red_obj.bb_dims.z / 2
    red_pose.orientation = base_tf.transform.rotation
    red_kdl = tf_conversions.fromMsg(red_pose)
    red_pos = red_kdl.p
    red_rot = PyKDL.Rotation(-base_unitX, -base_unitY, base_unitZ)
    red_kdl = PyKDL.Frame(red_rot, red_pos)
    red_pose = tf_conversions.toMsg(red_kdl)

    red_frame = TransformStamped()
    red_frame.header.frame_id = base_frame
    red_frame.header.stamp = rospy.Time.now()
    red_frame.child_frame_id = 'red_frame'
    red_frame.transform.translation = red_pose.position
    red_frame.transform.rotation = red_pose.orientation

    return blue_frame, red_frame
def get_next_setpoint(path):
    global tf_buffer, robot_frame_id, look_ahead_distance, distance_converged
    distance_left = look_ahead_distance

    transform = tf_buffer.lookup_transform(robot_frame_id,
                                           path.header.frame_id, rospy.Time(0),
                                           rospy.Duration(1))
    transform_inverse = tf_buffer.lookup_transform(path.header.frame_id,
                                                   robot_frame_id,
                                                   rospy.Time(0),
                                                   rospy.Duration(1))

    point = tf2_geometry_msgs.do_transform_point(
        PointStamped(path.header, path.poses[0].pose.position), transform)

    distance = np.linalg.norm(np.array([[point.point.x, point.point.y]]))

    last_point = PointStamped()

    # print(len(path.poses))

    if 1 == len(path.poses):
        point.header = path.header
        point.point = path.poses[0].pose.position
        if distance < distance_converged:
            del path.poses[0]
        return point
    elif distance <= (look_ahead_distance / 2.0):
        del path.poses[0]
        last_point = point
        distance_left -= distance
    elif distance > look_ahead_distance:
        point.header = path.header
        point.point = path.poses[0].pose.position
        return point

    last_position = np.array([[last_point.point.x, last_point.point.y]])
    for pose in path.poses:
        # FIXME: We assume that it is the same transform...
        point = tf2_geometry_msgs.do_transform_point(
            PointStamped(path.header, pose.pose.position), transform)
        position = np.array([[point.point.x, point.point.y]])
        distance = np.linalg.norm(position - last_position)
        if distance > distance_left:
            last_point.header = path.header
            # Return interpolated between point and last_point
            last_point.point = interpolate(last_point.point, point.point,
                                           distance_left / distance)
            return tf2_geometry_msgs.do_transform_point(
                last_point, transform_inverse)

        distance_left -= distance
        last_point = point
        last_position = position

    point.header = path.header
    point.point = path.poses[-1].pose.position
    return point  # Return last element
def move(path):
    global control_client, robot_frame_id, pub
    rate = rospy.Rate(10)
    # Call service client with path
    response = control_client(path)
    # Transform Setpoint from service client
    transform = tf_buffer.lookup_transform(robot_frame_id,
                                           response.setpoint.header.frame_id,
                                           rospy.Time(0))
    transformed_setpoint = tf2_geometry_msgs.do_transform_point(
        response.setpoint,
        transform)  #transform the setpoint to base_link frame
    # Create Twist message from the transformed Setpoint
    twist_msg = Twist()
    twist_msg.angular.z = max_angular_velocity * atan2(
        transformed_setpoint.point.y, transformed_setpoint.point.x)
    twist_msg.linear.x = max_linear_velocity * sqrt(
        transformed_setpoint.point.y**2 + transformed_setpoint.point.x**2)
    if twist_msg.angular.z > max_angular_velocity:
        twist_msg.angular.z = max_angular_velocity
    if twist_msg.linear.x > max_linear_velocity:
        twist_msg.linear.x = max_linear_velocity
    # Publish Twist
    pub.publish(twist_msg)
    rate.sleep()
    # Call service client again if the returned path is not empty and do stuff again
    i = 0
    while response.new_path.poses:
        i = i + 1
        print("There are poses " + str(i))
        response = control_client(response.new_path)
        transformer = tf_buffer.lookup_transform(
            robot_frame_id, response.setpoint.header.frame_id, rospy.Time(0))
        new_setpoint = tf2_geometry_msgs.do_transform_point(
            response.setpoint,
            transformer)  #transform the setpoint to base_link frame
        # Create Twist message from the transformed Setpoint
        msg = Twist()
        msg.angular.z = max_angular_velocity * atan2(
            new_setpoint.point.y,
            new_setpoint.point.x)  ###########doesnt feel right
        msg.linear.x = max_linear_velocity * sqrt(
            new_setpoint.point.y**2 +
            new_setpoint.point.x**2)  ##############doesnt feel right
        if msg.angular.z > max_angular_velocity:
            msg.angular.z = max_angular_velocity
        if msg.linear.x > max_linear_velocity:
            msg.linear.x = max_linear_velocity
        pub.publish(msg)
        rate.sleep()
    # Send 0 control Twist to stop robot
    stop = Twist()
    stop.angular.z = 0
    stop.linear.x = 0
    pub.publish(stop)
    rate.sleep()
def move(path):
    global control_client, robot_frame_id, pub

    # Call service client with path
    res = control_client(path)
    # print("The frame is: %s" %res.setpoint.header.frame_id)

    # Transform Setpoint from service client
    transform = tf_buffer.lookup_transform(robot_frame_id, "map", rospy.Time()) 
    transformed_setpoint = tf2_geometry_msgs.do_transform_point(res.setpoint, transform)
    

    # Create Twist message from the transformed Setpoint
    msg = Twist()
    msg.angular.z = 4 * atan2(transformed_setpoint.point.y, transformed_setpoint.point.x)
    if msg.angular.z > max_angular_velocity:
        msg.angular.z = max_angular_velocity

    msg.linear.x = 0.5 * math.sqrt(transformed_setpoint.point.x ** 2 + transformed_setpoint.point.y ** 2)
    if msg.linear.x > max_linear_velocity:
        msg.linear.x = max_linear_velocity

    # Publish Twist
    pub.publish(msg)
   
    # Call service client again if the returned path is not empty and do stuff again
    new_path = res.new_path

    while new_path.poses:

        res = control_client(new_path)
        #tf_buffer = tf2_ros.Buffer()  #necessary?      
        #listener = tf2_ros.TransformListener(tf_buffer) #necessary?   
        transform = tf_buffer.lookup_transform(robot_frame_id, "map", rospy.Time()) #necessary?   
        transformed_setpoint = tf2_geometry_msgs.do_transform_point(res.setpoint, transform)
        msg = Twist() #necessary?   
        msg.angular.z = 4 * atan2(transformed_setpoint.point.y, transformed_setpoint.point.x)
        if msg.angular.z > max_angular_velocity:
            msg.angular.z = max_angular_velocity

        msg.linear.x = 0.5 * math.sqrt(transformed_setpoint.point.x ** 2 + transformed_setpoint.point.y ** 2)
        if msg.linear.x > max_linear_velocity:
            msg.linear.x = max_linear_velocity

        pub.publish(msg)

        new_path = res.new_path

        rate.sleep()
    
    print("new_path.poses = " + str(new_path.poses))
    # Send 0 control Twist to stop robot 
    msg.angular.z = 0
    msg.linear.x = 0
    pub.publish(msg)
示例#6
0
def callback(input):
    if is_similar(pose_old, input.pose.pose):
        return
    odom2base = TransformStamped()
    odom2base.transform.translation = input.pose.pose.position
    odom2base.transform.rotation = input.pose.pose.orientation
    whl_l_pos = tf2_geometry_msgs.do_transform_point(base2wheel_l,
                                                     odom2base).point
    traj["l"].append(whl_l_pos)
    whl_r_pos = tf2_geometry_msgs.do_transform_point(base2wheel_r,
                                                     odom2base).point
    traj["r"].append(whl_r_pos)
    show_length()
示例#7
0
    def convert_to_worldframe(self, data):

        try:
            if data is None or self.location is None:
                return

            target_frame = "base_link"
            source_frame = "camera_depth_optical_frame"

            # define a source point
            point_wrt_source = Point(data[0], data[1], data[2])

            transformation = tf_buffer.lookup_transform(
                target_frame, source_frame, rospy.Time(0), rospy.Duration(0.1))

            point_wrt_target = tf2_geometry_msgs.do_transform_point(
                PointStamped(point=point_wrt_source), transformation).point

            self.publish_destination(
                np.array([
                    self.location.x + point_wrt_target.x,
                    self.location.y + point_wrt_target.y,
                    self.location.z + point_wrt_target.z, point_wrt_target.x,
                    point_wrt_target.y, point_wrt_target.z
                ]))

        except:
            traceback.print_exc()
示例#8
0
    def transform_point(self, p, target_frame, source_frame, time=0.0):
        """ Transform a point from the source frame to the target frame.

        Parameters
        ----------
        p : iterable, len 3
            Input point in source frame.

        target_frame : str
            Name of the frame to transform into.

        source_frame : str
            Name of the input frame.

        time : float, default 0.0
            Time at which to get the transform. (0 will get the latest)

        Returns
        -------
        tuple, len 3
            Transformed point in target frame.
        """
        import rospy
        import tf2_geometry_msgs

        from .msg import make_point_msg, unpack_point_msg

        transform = self._buffer.lookup_transform(target_frame, source_frame,
                                                  rospy.Time.from_sec(time))
        p_msg = make_point_msg(p, source_frame, time)
        pt_msg = tf2_geometry_msgs.do_transform_point(p_msg, transform)

        return unpack_point_msg(pt_msg, stamped=True)
示例#9
0
    def _range_bearing_callback(self, bearing_msg):
        x = bearing_msg.range * \
            m.cos(bearing_msg.bearing) * m.cos(bearing_msg.elevation)
        y = bearing_msg.range * \
            m.sin(bearing_msg.bearing) * m.cos(bearing_msg.elevation)
        z = bearing_msg.range * m.sin(bearing_msg.elevation)

        source_frame_point = PointStamped()
        source_frame_point.header.stamp = rospy.Time.now()
        source_frame_point.header.frame_id = bearing_msg.header.frame_id
        source_frame_point.point.x = x
        source_frame_point.point.y = y
        source_frame_point.point.z = z

        transform = self._tf_buffer.lookup_transform(target_frame=self._world_frame,
                                                     source_frame=source_frame_point.header.frame_id,
                                                     time=rospy.Time.now(),
                                                     timeout=rospy.Duration(
                                                         1.0)
                                                     )

        target_frame_point = tf2_geometry_msgs.do_transform_point(
            source_frame_point, transform)

        rospy.loginfo("Located a beacon at %s" %
                      (" ".join(str(target_frame_point.point).split())))
        rospy.loginfo("  - Distance : %7.2f" % (m.sqrt(x**2 + y**2)))
        rospy.loginfo("  - Depth    : %7.2f" % (z))

        self._black_box_pose_pub.publish(target_frame_point)
        self._publish_marker(target_frame_point)
示例#10
0
    def get_mask_img(self, transform, target_bin, camera_model):
        """
        :param point: point that is going to be transformed
        :type point: PointStamped
        :param transform: camera_frame -> bbox_frame
        :type transform: Transform
        """
        # check frame_id of a point and transform just in case
        assert camera_model.tf_frame == transform.header.frame_id
        assert target_bin.bbox.header.frame_id == transform.child_frame_id

        transformed_list = [
                do_transform_point(corner, transform)
                for corner in target_bin.corners]
        projected_points = self.project_points(transformed_list, camera_model)

        # generate an polygon that covers the region
        path = Path(projected_points)
        x, y = np.meshgrid(
                np.arange(camera_model.width / self.camera_model.binning_x),
                np.arange(camera_model.height / self.camera_model.binning_y))
        x, y = x.flatten(), y.flatten()
        points = np.vstack((x, y)).T
        mask_img = path.contains_points(points).reshape(
                camera_model.height / self.camera_model.binning_x,
                camera_model.width / self.camera_model.binning_y)
        mask_img = (mask_img * 255).astype(np.uint8)
        return mask_img
示例#11
0
    def _publish_detection(self, detection_data, detection_frame,
                           detection_stamp):
        source_detection_point = PointStamped()
        source_detection_point.header.frame_id = detection_frame
        source_detection_point.header.stamp = detection_stamp
        source_detection_point.point = detection_data.location

        transform = self._tf_buffer.lookup_transform(
            target_frame=self._utm_frame,
            source_frame=detection_frame,
            time=rospy.Time.now(),
            timeout=rospy.Duration(1.0))
        utm_detection_point = tf2_geometry_msgs.do_transform_point(
            source_detection_point, transform)

        utm_pose = PoseStamped()
        utm_pose.pose.orientation.x = 0.0
        utm_pose.pose.orientation.y = 0.0
        utm_pose.pose.orientation.z = 0.0
        utm_pose.pose.orientation.w = 1.0
        utm_pose.pose.position.x = utm_detection_point.point.x
        utm_pose.pose.position.y = utm_detection_point.point.y
        utm_pose.pose.position.z = utm_detection_point.point.z
        geo_pose = utm_to_wgs84_pose(utm_pose, self._current_nav_sat_fix)
        geo_pose.header.frame_id = detection_data.category
        self._perception_pub.publish(geo_pose)
        rospy.logwarn(geo_pose)
示例#12
0
def callback(data):
    #rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.point)
    # CoG_Position = data.point
    # print("CoG Position: ", CoG_Position)

    try:
        trans = tfBuffer.lookup_transform("map", "base_link", rospy.Time(0),
                                          rospy.Duration(0.01))
    except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
            tf2_ros.ExtrapolationException):
        raise

    CoG_transformed = tf2_geometry_msgs.do_transform_point(
        data, trans)  # Transform Point from base_link frame to map frame

    c = np.array([
        CoG_transformed.point.x, CoG_transformed.point.y,
        CoG_transformed.point.z
    ])
    dcom, ac = com_sg.send(
        (time.time(),
         c))  # Applying SG filter to get velocity and acceleration

    # contact forces (f / total_mass)
    f = ac - gravity

    # zram
    alpha = (ground_level - c[2]) / f[2]

    zram = c + alpha * f

    array = Float32MultiArray(data=zram)

    pub.publish(array)
示例#13
0
    def cb_object_detector(self, data):
        if self.detector_state.is_initialized():
            self.sensor_frame = data.header.frame_id
            try:
                # TF
                self.trans_sensor_global = self.tf_buffer.lookup_transform(
                    self.global_frame, self.sensor_frame, rospy.Time())
                detected_obstacles = []
                for o in data.objects:
                    p = PointStamped()
                    p.point.x = o.pose.position.x
                    p.point.y = o.pose.position.y
                    p.point.z = o.pose.position.z
                    p_global = tf2_geometry_msgs.do_transform_point(
                        p, self.trans_sensor_global)
                    mind, mini = 0, np.inf
                    for i in range(
                            len(self.detector_state.global_waypoints.waypoints)
                    ):
                        wp_position = self.detector_state.global_waypoints.waypoints[
                            i].pose.pose.position
                        dx = p_global.point.x - wp_position.x
                        dy = p_global.point.y - wp_position.y
                        d_eucl = np.sqrt(dx**2 + dy**2)
                        if (d_eucl < self.safety_threshold):
                            if d_eucl < mind:
                                mind = d_eucl
                    if mind < self.safety_threshold:
                        detected_obstacles.append(o)

                print(len(detected_obstacles), len(data.objects))

            except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                    tf2_ros.ExtrapolationException):
                rospy.logerr("TF error")
示例#14
0
    def _range_callback(self, msg):
        with self._lock:
            z = msg.pose.pose.position.z
            _time = msg.header.stamp
            if math.isinf(z) or z < self._range_min or z > self._range_max:
                rospy.logerr("AltimeterFilter: rejecting range of {}".format(z))
                return

            try:
                transform = self.tf_buffer.lookup_transform(
                    "base_link", msg.header.frame_id, _time, rospy.Duration(1.0)
                )
            except Exception as e:
                rospy.logerr(
                    "AltimeterFilter: error looking up transform {}".format(e)
                )
                raise

            stamped_point = PointStamped()
            stamped_point.point.z = z

            final_pose = tf2_geometry_msgs.do_transform_point(
                stamped_point, transform
            )

            final_msg = PoseWithCovarianceStamped()
            final_msg.header.frame_id = "odom"
            final_msg.header.stamp = _time
            final_msg.pose.pose.position.z = final_pose.point.z
            final_msg.pose.covariance = msg.pose.covariance
            self._range_pub.publish(final_msg)
    def _obstacle_callback(self, msg):
        try:
            transform = self.tf_buffer.lookup_transform(
                self.map_frame, msg.header.frame_id, msg.header.stamp,
                rospy.Duration(1.0))
        except (tf2.ConnectivityException, tf2.LookupException,
                tf2.ExtrapolationException) as e:
            rospy.logwarn(e)
            return

        for o in msg.obstacles:
            point = PointStamped()
            point.header = msg.header
            point.point = o.pose.pose.pose.position
            # Transfrom robot to map frame
            point = tf2_geometry_msgs.do_transform_point(point, transform)
            # Update robots that are close together
            cleaned_robots = []
            for old_robot in self.robots:
                # Check if there is another robot in memory close to it
                if np.linalg.norm(
                        ros_numpy.numpify(point.point) - ros_numpy.numpify(
                            old_robot.point)) > self.robot_merge_distance:
                    cleaned_robots.append(old_robot)
            # Update our robot list with a new list that does not contain the duplicates
            self.robots = cleaned_robots
            # Append our new robots
            self.robots.append(point)
示例#16
0
    def poses_callback(self, pose_msg, identity):
        """Callback for poses of other agents"""
        rospy.loginfo_once('Got other poses callback')
        if self.pose is None:
            return

        target_frame = f'drone_{self.my_id}/base_link'
        source_frame = 'world'

        try:
            transform = self.buffer.lookup_transform(target_frame=target_frame,
                                                     source_frame=source_frame,
                                                     time=rospy.Time(0))
        except Exception as e:
            print(e)
            return

        point = PointStamped()
        point.header = pose_msg.header
        point.point.x = pose_msg.pose.position.x
        point.point.y = pose_msg.pose.position.y
        point.point.z = pose_msg.pose.position.z

        point_tf = tf2_geometry_msgs.do_transform_point(point, transform)
        p = point_tf.point

        self.poses[identity] = np.array([p.x, p.y, p.z])
示例#17
0
def free_space_srv_cb(req):

    global map_data
    global got_map
    global tfBuffer

    point = req.point
    point_stamped = PointStamped()

    # TODO: Fill in the infos of point_stamped
    # Dont forget the header
    # call the function tfBuffer.lookup_transform with the right params to
    # get the transformation t

    try:
        point_in_map = tg.do_transform_point(point_stamped, t)
    except Exception as e:
        print e
        return -1
    while not got_map:
        rospy.sleep(0.1)

    ans = point_is_free(map_data, point_in_map)
    if ans >= 80:
        return 0
    elif ans == -1:
        return -1
    else:
        return 1
示例#18
0
def get_mask_img(transform, target_bin, camera_model):
    """
    :param point: point that is going to be transformed
    :type point: PointStamped
    :param transform: camera_frame -> bbox_frame
    :type transform: Transform
    """
    # check frame_id of a point and transform just in case
    assert camera_model.tf_frame == transform.header.frame_id
    assert target_bin.bbox.header.frame_id == transform.child_frame_id

    transformed_list = [
            do_transform_point(corner, transform)
            for corner in target_bin.corners]
    projected_points = project_points(transformed_list, camera_model)

    # generate an polygon that covers the region
    path = Path(projected_points)
    x, y = np.meshgrid(
            np.arange(camera_model.width),
            np.arange(camera_model.height))
    x, y = x.flatten(), y.flatten()
    points = np.vstack((x, y)).T
    mask_img = path.contains_points(
            points).reshape(
                    camera_model.height, camera_model.width
                ).astype('bool')
    return mask_img
    def filter_frame(self, frame, trans, stamp):
        """
        Applies a four-step algorithm to find all the roombas in a given BGR
        image.
    
        :param frame: raw BGR image in which to find roombas
        :type frame: numpy.ndarray of shape 3 x WIDTH x HEIGHT with dtype uint8
        :return: None
    
        .. note::
            Does not return data, only debugs the result
    
        .. note::
            The image origin is in the upper-left hand corner
        """
        points = []

        for img_coords in self.bound_roombas(frame):
            cam_ray = pixel_to_ray(img_coords, frame.shape, self.daov)
            # Convert that camera ray to world space (map frame)
            # See note in script header about do_transform_vector3
            map_ray = tf2_geometry_msgs.do_transform_vector3(
                cam_ray, copy.deepcopy(trans))

            # Scale the direction to hit the ground (plane z=0)
            # Direction scale should always be positive
            cam_pos = tf2_geometry_msgs.do_transform_point(
                PointStamped(point=Point(0, 0, 0)), trans).point
            direction_scale = -(cam_pos.z - ROOMBA_HEIGHT) / map_ray.vector.z
            roomba_pos = Point()
            roomba_pos.x = cam_pos.x + map_ray.vector.x * direction_scale
            roomba_pos.y = cam_pos.y + map_ray.vector.y * direction_scale
            roomba_pos.z = 0

            # Debug the roomba line
            points.append(trans.transform.translation)
            points.append(roomba_pos)

            # Add the roomba to array and publish
            self.odom_lock.acquire()
            sq_tolerance = 0.1 if len(self.odom_array.data) < 10 else 1000
            for i in xrange(len(self.odom_array.data)):
                pt = self.odom_array.data[i].pose.pose.position
                if (roomba_pos.x - pt.x)**2 + \
                   (roomba_pos.y - pt.y)**2 < sq_tolerance:
                    self.odom_array.data[i].header.stamp = stamp
                    self.odom_array.data[i].pose.pose.position = roomba_pos
                    break
            else:
                item = Odometry()
                item.child_frame_id = "roomba%d" % len(self.odom_array.data)
                item.header.frame_id = "map"
                item.header.stamp = stamp
                item.pose.pose.position = roomba_pos
                item.pose.pose.orientation.z = 1
                self.odom_array.data.append(item)
            self.publisher.publish(self.odom_array)
            self.odom_lock.release()

        self.debug.rviz_lines(points, 0)
示例#20
0
def getTarget():  #in robot base frame
    transf = tfBuffer.lookup_transform(BASE_FRAME,
                                       MAP_FRAME,
                                       rospy.Time(),
                                       timeout=rospy.Duration(2))
    print(transf)
    tgt = tf2_geometry_msgs.do_transform_point(target, transf)
    return np.array([tgt.point.x, tgt.point.y, 0])
示例#21
0
def avoid_collision(setpoint):
    global radius
    transform = tf_buffer.lookup_transform(
        robot_frame_id, setpoint.header.frame_id, rospy.Time(0), rospy.Duration(1))
    transform_inverse = tf_buffer.lookup_transform(
        setpoint.header.frame_id, robot_frame_id, rospy.Time(0), rospy.Duration(1))

    point = tf2_geometry_msgs.do_transform_point(setpoint, transform)

    goal = [point.point.x, point.point.y]
    obstacles = get_obstacles()

    control = orm.avoid_collision(goal, obstacles)

    point.point.x = control[0]
    point.point.y = control[1]

    return tf2_geometry_msgs.do_transform_point(point, transform_inverse)
示例#22
0
文件: core.py 项目: ayhanalp/AlpBoss
    def transform_point(self, point, _from, _to):
        '''Transforms point (geometry_msgs/PointStamped) from frame "_from" to
        frame "_to".
        Arguments:
            - _from, _to = string, name of frame
        '''
        transform = self.kalman.get_transform(_from, _to)
        point_transformed = tf2_geom.do_transform_point(point, transform)

        return point_transformed
示例#23
0
def move(path):

    global control_client, robot_frame_id, pub

    # Call service client (collision avoidance) with path
    try:
        service_result = control_client(path)
        setpoint = service_result.setpoint
        service_path = service_result.new_path
    except:
        print('false')

    while service_path.poses:  # This loop keeps going while the service_path is not empty
        try:
            service_result = control_client(service_path)
            setpoint = service_result.setpoint
            service_path = service_result.new_path
        except:
            print('error')

        # Transform Setpoint from service client
        trans = tf_buffer.lookup_transform(robot_frame_id,
                                           setpoint.header.frame_id,
                                           setpoint.header.stamp)
        transformed_setpoint = tf2_geometry_msgs.do_transform_point(
            setpoint, trans)

        # Create Twist message from the transformed Setpoint
        msg = Twist()

        if math.atan2(transformed_setpoint.point.y,
                      transformed_setpoint.point.x) < max_angular_velocity:
            msg.angular.z = math.atan2(transformed_setpoint.point.y,
                                       transformed_setpoint.point.x)
        else:
            msg.angular.z = max_angular_velocity

        if 0.5 * math.sqrt(
                transformed_setpoint.point.x**2 +
                transformed_setpoint.point.y**2) < max_linear_velocity:
            msg.linear.x = msg.linear.x = 0.6 * math.sqrt(
                transformed_setpoint.point.x**2 +
                transformed_setpoint.point.y**2)
        else:
            msg.linear.x = max_linear_velocity

        # Publish Twist
        pub.publish(msg)

    msg.linear.x = 0
    msg.angular.z = 0
    pub.publish(msg)

    # Get new path from action server
    get_path()
    def led_image_location(self, image_blob):
        
        loc = self.blob_location(image_blob)
        if loc is None:
            rospy.logwarn('Could not find average in point cloud')
            return
        (x, y, z, r, g, b) = loc  
        point_camera_frame = PointStamped(
            point=Point(x=x, y=y, z=z), 
            header=image_blob.header)
        # Publish points in camera optical frame for debugging
        self.led_publisher_camera.publish(
            PointStampedColorRGBA(
                point_stamped=point_camera_frame,
                color_rgba=ColorRGBA(r=r, g=g, b=b, a=1)))

        # translate frame from left_camera_optical_frame to head
        #
        # Coordinates!  This is my understanding...
        #
        # Camera optical frame coordinates are x right, y down, z ahead
        # Head frame coordinates are x ahead, y left, z up
        #
        # The depth channel is:
        # A large z value in the camera optical frame.        
        # A large x value in the head frame.
        #
        # Also expect sign change on the image plane coordinates as the
        # camera is installed upside down. 
        #
        # check for transform
        if not self.tf_buffer.can_transform(self.target_frame, 
                                             point_camera_frame.header.frame_id, 
                                             point_camera_frame.header.stamp):
            rospy.logwarn('No transform')
            return
        trans = self.tf_buffer.lookup_transform(
            self.target_frame,
            point_camera_frame.header.frame_id,
            point_camera_frame.header.stamp)
        point_head_frame = tf2_geometry_msgs.do_transform_point(
            point_camera_frame,
            trans)
        color = ColorRGBA(r=r, g=g, b=b, a=1)
        point_to_publish = PointStampedColorRGBA(
            point_stamped=point_head_frame,
            color_rgba=color)
        rospy.loginfo('Head (x, y, z, r, g, b) (%f, %f, %f, %d, %d, %d)', 
                      point_to_publish.point_stamped.point.x,
                      point_to_publish.point_stamped.point.y,
                      point_to_publish.point_stamped.point.z,
                      point_to_publish.color_rgba.r,
                      point_to_publish.color_rgba.g,
                      point_to_publish.color_rgba.b)
        self.led_publisher.publish(point_to_publish)
示例#25
0
def transformSetPoint(setPointResponse):
    
    try:
        rospy.loginfo("trying to transform from source %s to target %s", setPointResponse.setpoint.header.frame_id, "base_link")
        trans = tfBuffer.lookup_transform("base_link", setPointResponse.setpoint.header.frame_id, setPointResponse.setpoint.header.stamp)
        
    except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
        rospy.logerr("transformation went wront")
        return None
    
    return tf2_geometry_msgs.do_transform_point(setPointResponse.setpoint, trans).point
示例#26
0
def move(path):
    global control_client, robot_frame_id, pub
    while path.poses:
        # Call service client with path
        response_service = control_client(
            path)  # The service has one request: the path.
        new_path = response_service.new_path  # In exchange it gives you as response: setpoint and new path
        setpoint = response_service.setpoint

        # Transform Setpoint from service client
        transform = tf_buffer.lookup_transform(
            robot_frame_id, setpoint.header.frame_id,
            rospy.Time())  # Create the transform matrix
        transformed_setpoint = tf2_geometry_msgs.do_transform_point(
            setpoint, transform)  # Assign the transform matrix to the setpoint

        # Create Twist message from the transformed Setpoint
        Twist_msg = Twist()

        # Assign the twist velocities as a direction vector toward the setpoint
        Twist_msg.angular.z = -atan2(transformed_setpoint.point.y,
                                     transformed_setpoint.point.x)
        Twist_msg.linear.x = sqrt(transformed_setpoint.point.x**2 +
                                  transformed_setpoint.point.y**2)

        # Clip maximum velocities
        # if(Twist_msg.linear.x >= 0):
        Twist_msg.linear.x = min(max_linear_velocity, Twist_msg.linear.x)
        #Twist_msg.angular.z =- min(max_angular_velocity,Twist_msg.angular.z)
        '''    
        else: 
            Twist_msg.linear.x = max(-max_linear_velocity, Twist_msg.linear.x)
        
        if(Twist_msg.angular.z >= 0):
            Twist_msg.angular.z=min(max_angular_velocity, Twist_msg.angular.x)
        else: 
            Twist_msg.angular.z=max(-max_angular_velocity, Twist_msg.angular.x)
            '''

        # Publish Twist
        pub.publish(Twist_msg)
        rate.sleep()

        # Call service client again if the returned path is not empty and do stuff again
        path = new_path

    # Send 0 control Twist to stop robot
    Twist_msg.angular.z = 0
    Twist_msg.linear.x = 0
    pub.publish(Twist_msg)
    rate.sleep()

    # Get new path from action server
    get_path()
示例#27
0
def lookAt(self, point):
    # Create tf transformer for z point transformation to keep TIAGo's head on the same level
    tfBuffer = tf2_ros.Buffer()
    tf = tf2_ros.TransformListener(tfBuffer)

    # Wait for the server to come up
    rospy.loginfo('Waiting for the point head server to come up')
    self.point_head_client.wait_for_server(rospy.Duration(10.0))

    # Create the goal
    print('Looking at: ', point)
    ph_goal = PointHeadGoal()
    ph_goal.target.header.frame_id = 'map'
    ph_goal.max_velocity = 1
    ph_goal.min_duration = rospy.Duration(0.5)
    ph_goal.target.point.x = point[0]
    ph_goal.target.point.y = point[1]

    ph_goal.pointing_frame = 'head_2_link'
    ph_goal.pointing_axis.x = 1
    ph_goal.pointing_axis.y = 0
    ph_goal.pointing_axis.z = 0

    ps = PointStamped()
    ps.header.stamp = rospy.Time(0)
    ps.header.frame_id = 'head_2_link'
    transform_ok = False
    while not transform_ok and not rospy.is_shutdown():
        try:
            transform = tfBuffer.lookup_transform('base_link', 'head_2_link',
                                                  rospy.Time(0))
            get_z_ps = do_transform_point(ps, transform)
            transform_ok = True
        # This usually happens only on startup
        except tf2_ros.ExtrapolationException as e:
            rospy.sleep(1.0 / 4)
            ps.header.stamp = rospy.Time(0)
            rospy.logwarn(
                "Exception on transforming point... trying again \n(" +
                str(e) + ") at time " + str(ps.header.stamp))
        except tf2_ros.LookupException:
            pass
        except tf2_ros.ConnectivityException:
            pass

    ph_goal.target.point.z = get_z_ps.point.z
    print(get_z_ps.point.z)

    # Send the goal
    rospy.loginfo("Sending the goal...")
    self.point_head_client.send_goal(ph_goal)
    rospy.loginfo("Goal sent!!")

    rospy.sleep(3)
示例#28
0
    def transform_points(self, conePos):
        tf_cone_pos = []
        for i in range(0,len(conePos)):

            # transform points from camera_rgb_optical_frame to map frame
            point_stamped = PointStamped()
            point_stamped.point = conePos[i]
            point_stamped.header = std_msgs.msg.Header()
            point_stamped.header.stamp = rospy.Time.now()
            point_stamped.header.frame_id = "camera_rgb_optical_frame"
            transformed_point = tf2_geometry_msgs.do_transform_point(point_stamped, self.transform)
            #print 'x,y,z', transformed_point.point.x,transformed_point.point.y, transformed_point.point.z
            tf_cone_pos.append([transformed_point.point.x, transformed_point.point.y])
        return tf_cone_pos
    def mainloop(self):
        # Set the rate of this loop
        rate = rospy.Rate(2)
        while not rospy.is_shutdown():
            if self.goal:
                try:
                    transform = self.tfBuffer.lookup_transform(
                        'world', 'tower', rospy.Time())

                    # msg = Vector3()
                    # msg.x = self.goal.x
                    # msg.y = self.goal.y
                    # msg.z = self.goal.z

                    # x = (msg.y * math.sin(0.523599)) + (msg.x * math.cos(0.523599))
                    # y = (msg.y * math.cos(0.523599)) - (msg.x * math.sin(0.523599))

                    # msg.x = round(x + (150*math.sin(0.506145)) - 0.370243036950555)
                    # msg.y = round(y - (150*math.cos(0.506145)) + 0.509702033064983)

                    # # msg.y = y*math.cos(0.523599) - x*math.sin(0.523599)
                    # # msg.x = y*math.sin(0.523599) + x*math.cos(0.523599)
                    # msg.z = 0
                    # Convert the goal to a PointStamped
                    point = PointStamped()
                    point.header.stamp = rospy.Time.now()
                    point.point.x = self.goal.x
                    point.point.y = self.goal.y
                    point.point.z = self.goal.z
                    # Use the do_transform_point function to convert the point using the transform
                    new_point = do_transform_point(point, transform)
                    # Convert the point back into a vector message containing intergers
                    msg = Vector3()
                    msg.x = new_point.point.x
                    msg.y = new_point.point.y
                    msg.z = new_point.point.z
                    # Publish the vector
                    rospy.loginfo(
                        str(rospy.get_name()) +
                        ": Publishing Transformed Goal {}".format(
                            [msg.x, msg.y]))
                    self.goalpub.publish(msg)
                    # The tower will automatically send you a new goal once the drone reaches the requested position.
                    # Reset the goal
                    self.goal = None
                except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                        tf2_ros.ExtrapolationException):
                    continue

            rate.sleep()
 def _balls_callback(self, msg):
     try:
         transform = self.tf_buffer.lookup_transform(
             self.map_frame, msg.header.frame_id, msg.header.stamp,
             rospy.Duration(1.0))
     except (tf2.ConnectivityException, tf2.LookupException,
             tf2.ExtrapolationException) as e:
         rospy.logwarn(e)
         return
     point = PointStamped()
     point.header = msg.header
     point.point = msg.pose.pose.position
     point = tf2_geometry_msgs.do_transform_point(point, transform)
     self.ball = point
示例#31
0
 def transform_point(self, point):
     """
     Convert a point to the local frame.
     """
     point_stamped = PointStamped()
     point_stamped.point.x = point[0]
     point_stamped.point.y = point[1]
     point_transformed_stamped = tf2_geometry_msgs.do_transform_point(
         point_stamped, self.transform)
     point_transformed = np.array([
         point_transformed_stamped.point.x,
         point_transformed_stamped.point.y
     ])
     return point_transformed
示例#32
0
    def cb_scan(self, data):
        """
        This callback runs each time a LIDAR scan is obtained from
        the /scan topic in ROS. Returns a topic /detection. If no
        object is found, /detection = [0,0]. If an object is found,
        /detection = [angle,distance] to median of scan.
        """
        self.target_pose = PoseWithCovarianceStamped()
        # Set max/min angle and increment
        scan_min = data.angle_min
        scan_max = data.angle_max
        scan_inc = data.angle_increment
        #now = rospy.get_rostime()
        scan_time = data.header.stamp.secs

        # Build angle array
        if self.physical_robot:
            y = np.arange(scan_min,scan_max,scan_inc)-1.57
        else:
            y = np.arange(scan_min,scan_max+0.01*scan_inc,scan_inc)

        # Pre-compute trig functions of angles
        ysin = np.sin(y)
        ycos = np.cos(y)

        # Apply a median filter to the range scans
        x = sg.medfilt(data.ranges,1)-1

        # Calculate the difference between consecutive range values
        x_diff1 = np.power(np.diff(x),2)

        # Convert range and bearing measurement to cartesian coordinates
        y_coord = x*ysin
        x_coord = x*ycos

        # Compute difference between consecutive values in cartesian coordinates
        y_diff = np.power(np.diff(y_coord),2)
        x_diff = np.power(np.diff(x_coord),2)

        # Compute physical distance between measurements
        dist = np.power(x_diff+y_diff,0.5)

        # Segment the LIDAR scan based on physical distance between measurements
        x2 = np.split(x,np.argwhere(dist>self.scan_dist_thresh))
        y2 = np.split(y,np.argwhere(dist>self.scan_dist_thresh))
        dist2 = np.split(dist,np.argwhere(dist>self.scan_dist_thresh))

        x_coord2 = np.split(x_coord,np.argwhere(dist>self.scan_dist_thresh))
        y_coord2 = np.split(y_coord,np.argwhere(dist>self.scan_dist_thresh))
        ran2 = np.split(data.ranges,np.argwhere(dist>self.scan_dist_thresh))

        bearing = np.array([0,0,0,0], dtype=np.float32)
        for i in range(len(y2)):
            # Check if there are at least 4 points in an object (reduces noise)
            ylen = len(y2[i])-0
            dist2_sum = np.sum(dist2[i][1:-2])
            if ylen > self.ylen_lim and dist2_sum > self.dist_min and dist2_sum < self.dist_max and np.median(ran2[i]) <= 25:
                x_pt = np.median(x_coord2[i])
                y_pt = np.median(y_coord2[i])
                if True:
                    ang = np.median(y2[i])
                    dis = np.median(x2[i])
                    mn = min(x2[i][1:ylen])
                    mx = max(x2[i][1:ylen])
                    dis = ((x_pt**2+y_pt**2))**0.5
                    if ang > self.ang_min and ang < self.ang_max:
                        sl = ylen
                        p = PointStamped()
                        p.point.x = x_pt
                        p.point.y = y_pt
                        p.point.z = 0.0
                        new_p = do_transform_point(p, self.R)
                        u = (dist-self.xmin)/(self.xmax-self.xmin)*(self.umax-self.umin)+self.umin
                        self.uvar = np.nanstd(u)**2
                        self.loc = (dis-self.xmin)/(self.xmax-self.xmin)*(self.locmax-self.locmin)+self.locmin
                        self.target_pose.pose.pose.position.x = new_p.point.x
                        self.target_pose.pose.pose.position.y = new_p.point.y
                        self.target_pose.pose.pose.position.z = self.loc
                        cov = self.target_pose.pose.covariance
                        self.target_pose.pose.covariance[0] = self.uvar
                        self.target_pose.pose.covariance[7] = self.uvar
                        cov[0] = 1.0*dis #self.uvar
                        cov[7] = 0.1*dis #self.uvar
                        h = std_msgs.msg.Header()
                        h.stamp = rospy.Time.now()
                        h.frame_id = 'base_link'
                        self.target_pose.header = h
                    else:
                        pass
                        #print('fail1')
                else:
                    pass
                    #print('fail2')
            else:
                pass
                #print('fail3',ylen,dist2_sum)

        # Publish bearing to ROS on topic /detection
        self.target_pub.publish(self.target_pose)

        pass