示例#1
0
 def perform(self, reevaluate=False):
     stand_pose = PoseStamped()
     stand_pose.header.stamp = rospy.Time.now()
     stand_pose.header.frame_id = 'base_footprint'
     stand_pose.pose.orientation.w = 1
     self.blackboard.pathfinding.publish(stand_pose)
     if not self.blackboard.blackboard.is_currently_walking():
         self.pop()
示例#2
0
 def pose2d_to_pose(self, pose2d):
     pose = PoseStamped()
     pose.header.stamp = rospy.Time.now()
     pose.header.frame_id = 'base_footprint'
     pose.pose.position.x = pose2d.x
     pose.pose.position.y = pose2d.y
     # Euler -> Quaternion (a lot simpler without roll and pitch)
     pose.pose.orientation.z = math.sin(0.5 * pose2d.theta)
     pose.pose.orientation.w = math.cos(0.5 * pose2d.theta)
     return pose
示例#3
0
def add_crosses():
    global tf_buffer
    global cam_info

    crosses = ["crossL", "crossR", "crossTM", "crosMM", "crossBM"]

    for cross in crosses:
        try:
            trans = tfBuffer.lookup_transform("camera_optical_frame",
                                              str(cross), rospy.Time(0))

            corner_stamped = PoseStamped()
            corner_stamped.header.stamp = self.get_clock().now()
            corner_stamped.header.frame_id = "camera_optical_frame"
            corner_stamped.pose.position.x = trans.transform.translation.x
            corner_stamped.pose.position.y = trans.transform.translation.y
            corner_stamped.pose.position.z = trans.transform.translation.z
            corner_stamped.pose.orientation = trans.transform.rotation

            p = [
                corner_stamped.pose.position.x, corner_stamped.pose.position.y,
                corner_stamped.pose.position.z
            ]
            k = np.reshape(cam_info.K, (3, 3))
            p_pixel = np.matmul(k, p)

            if p_pixel[2] > 0:
                p_pixel = p_pixel * (1 / p_pixel[2])

                if p_pixel[0] > 0 and p_pixel[0] <= cam_info.width and p_pixel[
                        1] > 0 and p_pixel[1] <= cam_info.height:
                    if np.random.uniform(10) < 8 or True:
                        pose = tfBuffer.transform(
                            corner_stamped,
                            "base_footprint",
                            timeout=Duration(seconds=0.5))

                        point_msg = Point()
                        point_msg.x = add_gaussian_noise(pose.pose.position.x)
                        point_msg.y = add_gaussian_noise(pose.pose.position.y)
                        point_msg.z = pose.pose.position.z  #todo set to 0?

                        pixel_msg = LineIntersectionRelative()
                        pixel_msg.pose.pose.pose.position = point_msg
                        pixels.type = pixel_msg.L

                        message_container.intersections.append(pixel_msg)

        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            continue

    message_container.header.stamp = self.get_clock().now()
示例#4
0
    def ScanMap(self):
        if self.odom is None:
            return

        # Transform: odom frame -> map frame
        poseStamped = PoseStamped()
        poseStamped.header = self.odom.header
        poseStamped.pose = self.odom.pose.pose
        try:
            poseStamped = self.tfBuffer.transform(poseStamped, self.map_frame, timeout=rospy.Duration(1.0 / self.rate))
        except:
            # rospy.logerr('Transform failed')
            return
        
        # Scan
        yaw = tf.transformations.euler_from_quaternion([poseStamped.pose.orientation.x, poseStamped.pose.orientation.y, 
                                                    poseStamped.pose.orientation.z, poseStamped.pose.orientation.w])[2]
        R = np.array([[np.cos(yaw), -np.sin(yaw)], [np.sin(yaw), np.cos(yaw)]])      
        T = np.array([poseStamped.pose.position.x, poseStamped.pose.position.y])
        
        scan_buffer = (np.matmul(self.scan_buffer, R.T) + T)
        scan_buffer = self.occGridParam.map2ImageTransform(scan_buffer)
        scan_buffer = np.int32(np.round(scan_buffer))
        scan_buffer[:, :, 0] = np.clip(scan_buffer[:, :, 0], 0, self.width - 1)
        scan_buffer[:, :, 1] = np.clip(scan_buffer[:, :, 1], 0, self.height - 1)
        pixel_values = self.occMap[scan_buffer[:, :, 1], scan_buffer[:, :, 0]]
        mask = pixel_values > 0
        indices = np.where(mask.any(axis=1), mask.argmax(axis=1), -1)

        pointclouds = []
        for index, scan_points in zip(indices, scan_buffer):
            if index == -1:
                pointcloud = [np.inf, np.inf, np.inf]
            else:
                pointcloud = [scan_points[index, 0], scan_points[index, 1], 0.0]
            pointclouds.append(pointcloud)
        pointclouds = np.asarray(pointclouds)
        pointclouds[:, :2] = self.occGridParam.image2MapTransform(pointclouds[:, :2])

        self.map_pointclouds = pointclouds
示例#5
0
    def ObstacleCallback(self, msg):
        if self.odom is None:
            return

        # Transform
        poseStamped = PoseStamped()
        poseStamped.header = msg.header
        poseStamped.pose = msg.pose.pose
        try:
            poseStamped = self.tfBuffer.transform(poseStamped, self.odom_frame, timeout=rospy.Duration(1.0 / self.rate))
        except:
            # rospy.logerr('Transform failed')
            return

        x, y, z = poseStamped.pose.position.x, poseStamped.pose.position.y, poseStamped.pose.position.z
        dx, dy = x - self.odom.pose.pose.position.x, y - self.odom.pose.pose.position.y
        dist = np.sqrt(dx ** 2 + dy ** 2)
        if dist < self.min_range or dist > self.max_range:
            self.obstacle_pointclouds = None
            return
        pointclouds = np.array([[x, y, z]])

        self.obstacle_pointclouds = pointclouds
def send_goal(checkpoint):
    goal = PoseStamped()
    goal.header.stamp = rospy.Time.now()
    goal.header.frame_id = 'cf1/base_link'
    goal.pose.position.x = checkpoint[0]
    goal.pose.position.y = checkpoint[1]
    goal.pose.position.z = checkpoint[2]

    x, y, z, w = quaternion_from_euler(0, 0, checkpoint[3])

    goal.pose.orientation.x = x
    goal.pose.orientation.y = y
    goal.pose.orientation.z = z
    goal.pose.orientation.w = w

    pose_pub.publish(goal)
def yaw_towards_frame(pose, target_frame):
    """Return the yaw towards frame represented in a quaternion (list)
	"""
    # TODO: send in transform tf_buf as argument instead of calculating here
    if not tf_buf.can_transform(target_frame, 'map', rospy.Time(0)):
        rospy.logwarn_throttle(5.0,
                               'No transform from %s to map' % target_frame)
        return
    #rospy.loginfo(['-'*20,pose])
    if pose.header.frame_id != "map":
        pose = tf_buf.transform(pose, 'map')
    #rospy.loginfo(euler_from_quaternion([pose.pose.orientation.x,pose.pose.orientation.y,pose.pose.orientation.z,pose.pose.orientation.w]))

    frame_pose = PoseStamped()  # [0, 0, 0]
    frame_pose.header.stamp = rospy.Time(0)
    frame_pose.header.frame_id = target_frame
    frame_pose.pose.orientation.w = 1

    #rospy.loginfo(tf_buf.registration.print_me())

    p = tf_buf.transform(frame_pose, 'map')

    frame_pos = np.array(
        [p.pose.position.x, p.pose.position.y, p.pose.position.z])
    curr = np.array(
        [pose.pose.position.x, pose.pose.position.y, pose.pose.position.z])

    rospy.loginfo(curr)
    rospy.loginfo(frame_pos)

    diff = frame_pos - curr
    rospy.loginfo(diff)
    theta = np.arctan2(diff[1], diff[0])
    rospy.loginfo(theta)

    q_rot = quaternion_from_euler(0, 0, theta, "sxyz")

    return q_rot
    def perform(self, reevaluate=False):
        # get ball pos
        ball_pos = self.blackboard.world_model.get_ball_position_xy()
        our_pose = self.blackboard.world_model.get_current_position()

        # decide on side
        if our_pose[1] < ball_pos[1]:
            side_sign = -1
        else:
            side_sign = 1

        # compute goal
        goal_x = ball_pos[0]
        if self.accept:
            goal_x += self.pass_pos_x

        # Limit the x position, so that we are not running into the enemy goal
        goal_x = min(self.max_x, goal_x)

        goal_y = ball_pos[1] + side_sign * self.pass_pos_y
        goal_yaw = 0

        pose_msg = PoseStamped()
        pose_msg.header.stamp = rospy.Time.now()
        pose_msg.header.frame_id = self.blackboard.map_frame
        pose_msg.pose.position.x = goal_x
        pose_msg.pose.position.y = goal_y
        quaternion = quaternion_from_euler(0, 0, goal_yaw)
        pose_msg.pose.orientation.x = quaternion[0]
        pose_msg.pose.orientation.y = quaternion[1]
        pose_msg.pose.orientation.z = quaternion[2]
        pose_msg.pose.orientation.w = quaternion[3]
        self.blackboard.pathfinding.publish(pose_msg)

        if self.blackboard.pathfinding.status in [GoalStatus.SUCCEEDED, GoalStatus.ABORTED]:
            self.pop()
示例#9
0
    def get_bbox_pose(self, bbox, cloud):
        # Initialize variables
        n = 0
        cog = [0] * 3
        min_dim = [None] * 3
        max_dim = [None] * 3
        cloud_dim = [0] * 3

        # Get the width and height of the bbox
        width = bbox[2] - bbox[0]
        height = bbox[3] - bbox[1]

        # Shrink the bbox by 10% to minimize background points
        xmin = bbox[0] + int(0.1 * width)
        xmax = bbox[2] - int(0.1 * width)
        ymin = bbox[1] + int(0.1 * height)
        ymax = bbox[3] - int(0.1 * height)

        # Place all the box points in an array to be used by the read_points function below
        bbox_points = [[x, y] for x in range(xmin, xmax)
                       for y in range(ymin, ymax)]

        # Clear the current list
        person_points = list()

        # Read in the x, y, z coordinates of all bbox points in the cloud
        for point in point_cloud2.read_points(cloud,
                                              skip_nans=True,
                                              uvs=bbox_points):
            try:
                for i in range(3):
                    if min_dim[i] is None or point[i] < min_dim[i]:
                        min_dim[i] = point[i]

                    if max_dim[i] is None or point[i] > max_dim[i]:
                        max_dim[i] = point[i]

                    cog[i] += point[i]

                n += 1

                #person_points.append(point)
            except:
                pass

        # Publish the pointcloud for this person
        #person_cloud = point_cloud2.create_cloud(cloud.header, cloud.fields, person_points)
        #self.people_cloud_pub.publish(person_cloud)

        # If we don't have any points, just return empty handed
        if n == 0:
            return (None, None)

        # Compute the COG and dimensions of the bounding box
        for i in range(3):
            cog[i] /= n
            cloud_dim[i] = max_dim[i] - min_dim[i]

        # Fill in the person pose message
        person_cog = PoseStamped()
        person_cog.header.frame_id = self.depth_frame
        person_cog.header.stamp = rospy.Time.now()

        person_cog.pose.position.x = cog[0]
        person_cog.pose.position.y = cog[1]
        person_cog.pose.position.z = cog[2]

        # Project the COG onto the base frame
        try:
            person_in_base_frame = self.tfBuffer.transform(
                person_cog, self.base_link)
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException) as e:
            return (None, None)

        # Set the person on the ground
        person_in_base_frame.pose.position.z = 0.0

        person_in_base_frame.pose.orientation.x = 0.707
        person_in_base_frame.pose.orientation.y = 0.0
        person_in_base_frame.pose.orientation.z = 0.707
        person_in_base_frame.pose.orientation.w = 0.0

        # Fill in the BoundBox message
        person_bounding_box = BoundingBox()
        person_bounding_box.header.frame_id = self.depth_frame
        person_bounding_box.header.stamp = rospy.Time.now()
        person_bounding_box.pose = person_in_base_frame.pose
        person_bounding_box.dimensions.x = cloud_dim[0]
        person_bounding_box.dimensions.y = cloud_dim[1]
        person_bounding_box.dimensions.z = cloud_dim[2]

        return (person_in_base_frame, person_bounding_box)
示例#10
0
def followperson(data):
    global detect
    global time_last_detection
    global lock
    global navigator

    lock.acquire(blocking=False)

    #print("data",data)
    dictionary = message_converter.convert_ros_message_to_dictionary(data)
    pixel_yolo = dictionary['data']
    pixel_yolo = eval(pixel_yolo)
    #print("pixel yolo",pixel_yolo)

    if pixel_yolo == (-1, -1):
        now = rospy.Time.now()
        #print(now)
        if (detect) and ((now - time_last_detection).to_nsec() < 1000000000):
            print("Personne perdue de vue")
            msg = Twist()
            msg.angular.z = 0.2
            cmd_pub.publish(msg)
        else:
            print("retour à la ronde")
            if (lock.locked()):
                lock.release()
        '''    
        try :
           # print("le robot doit s'arrêter'")
            #cancel_msg = GoalID()
            #cancel_pub.publish(cancel_msg)
           #p_stop = PoseStamped()
           # p_stop.header.frame_id = 'base_link'
           # p_stop.pose.position.x = 0
           # p_stop.pose.position.y = 0
           # p_stop.pose.position.z = 0
           # p_stop.pose.orientation.w = 0
           # p_stop_map = tf_listener.transformPose('map', p_stop)

        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            print("erreur")
            pass
        

        print("publie STOP")
        #yolo_goal.publish(p_stop_map)
        navigator.shutdown()
        fin = rospy.get_rostime()
        #print("fin", fin,"debut", debut)
        pass
        '''

    elif pixel_yolo == (-2, -2):
        if id_check.verif == None:
            pass

        elif id_check.verif == "OK":
            if (lock.locked()):
                lock.release()
            detect = False

        elif if_check.verif == "Probleme":
            print("ERREUR D'IDENTIFICATION")
            if (lock.locked()):
                lock.release()

    if (pixel_yolo != (-1, -1)):
        detect = True
        time_last_detection = rospy.Time.now()
        try:
            point_camera = PointFromPixel(pixel_yolo, model)
            p_camera = PoseStamped()
            p_camera.header.frame_id = 'camera_rgb_optical_frame'
            p_camera.pose.position.x = point_camera[0]
            p_camera.pose.position.y = point_camera[1]
            p_camera.pose.position.z = point_camera[2]
            p_map = tf_listener.transformPose('map', p_camera)
            p_map.pose.orientation.x = 0
            p_map.pose.orientation.y = 0
            p_map.pose.orientation.z = 0
            p_map.pose.orientation.w = 1
            print(p_map)
            pos = {'x': p_map.pose.position.x, 'y': p_map.pose.position.y}
            quat = {
                'r1': p_map.pose.orientation.x,
                'r2': p_map.pose.orientation.y,
                'r3': p_map.pose.orientation.z,
                'r4': p_map.pose.orientation.w
            }

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            print("erreur")
            pass

        print("publie")
        # yolo_goal.publish(p_map)
        success = navigator.goto(pos, quat)
    '''
    else:
        #cancel_msg = GoalID()
        #cancel_pub.publish(cancel_msg)

        # even if it's called shutdown, should just cancel actual goal without shuting down the system
        navigator.shutdown()
    '''

    # quand il arrive à la personne...
    # envoyer information via serveur pour executer security4.py

    if (lock.locked()):
        lock.release()
示例#11
0
def publish_goals():
    global tf_buffer
    global cam_info
    global pubGoals  #todo really necessary?!
    detected_goals = []

    goals_msg = GoalRelative()
    goals = ["goalLB", "goalLT", "goalRB", "goalRT"]

    for goal in goals:
        try:
            trans = tfBuffer.lookup_transform("camera_optical_frame",
                                              str(goal), rospy.Time(0))

            corner_stamped = PoseStamped()
            corner_stamped.header.frame_id = "camera_optical_frame"
            corner_stamped.pose.position.x = trans.transform.translation.x
            corner_stamped.pose.position.y = trans.transform.translation.y
            corner_stamped.pose.position.z = trans.transform.translation.z
            corner_stamped.pose.orientation = trans.transform.rotation

            p = [
                corner_stamped.pose.position.x, corner_stamped.pose.position.y,
                corner_stamped.pose.position.z
            ]
            k = np.reshape(cam_info.K, (3, 3))
            p_pixel = np.matmul(k, p)

            if p_pixel[2] > 0:
                p_pixel = p_pixel * (1 / p_pixel[2])

                if p_pixel[0] > 0 and p_pixel[0] <= cam_info.width and p_pixel[
                        1] > 0 and p_pixel[1] <= cam_info.height:
                    if np.random.uniform(10) < 8 or True:
                        pose = tfBuffer.transform(
                            corner_stamped,
                            "base_footprint",
                            timeout=Duration(seconds=0.5))

                        point_msg = Point()
                        point_msg.x = add_gaussian_noise(pose.pose.position.x)
                        point_msg.y = add_gaussian_noise(pose.pose.position.y)
                        point_msg.z = pose.pose.position.z  #todo set to 0?

                        detected_goals.append(point_msg)

        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            continue

    if len(detected_goals) == 1:
        goals_msg.left_post = detected_goals[0]
        goals_msg.right_post = detected_goals[0]

    elif len(detected_goals) == 2:
        goals_msg.left_post = detected_goals[0]
        goals_msg.right_post = detected_goals[1]

    goals_msg.header.stamp = self.get_clock().now()

    pubGoals.publish(goals_msg)