class DroneCommunication:

    # Initialization Function
    def __init__(self):

        # Parameters from YAML File
        self.drone_base_footprint_frame = rospy.get_param('drone_base_footprint')
        self.drone_base_link_frame = rospy.get_param('drone_base_link')
        self.map_frame = rospy.get_param('map_frame')
        self.control_drone_srv_name = rospy.get_param('drone_control_srv')

        # TF Listener for Getting Drone Position
        self.tf = Buffer()
        self.tf_listener = TransformListener(self.tf)
        # String with Num. of Command that will be sent to Drone Service
        self.drone_command = ''
        # Control Drone Service
        self.control_drone = rospy.ServiceProxy(self.control_drone_srv_name, ControlDrone)

    # Get the Yaw Between Map and Drone Function
    def get_drone_yaw(self):
        # TODO: Read TF and Compute Yaw
        try:
            tf_ = self.tf.lookup_transform(self.drone_base_link_frame, self.map_frame,
                                           rospy.Time(0), rospy.Duration(1.0))
            (_, _, yaw) = euler_from_quaternion([tf_.transform.rotation.x, tf_.transform.rotation.y,
                                                 tf_.transform.rotation.z, tf_.transform.rotation.w])
            return 180-np.rad2deg(yaw)
        except tf.LookupException or tf.ConnectivityException or tf.ExtrapolationException:
            return -1

    # Get the Position in Map of Drone Function
    def get_drone_pose(self):

        # TODO: Read TF and Compute Yaw
        try:
            tf_ = self.tf.lookup_transform(self.map_frame, self.drone_base_link_frame,
                                           rospy.Time(0), rospy.Duration(1.0))
            (r1, r2, r3, _) = quaternion_matrix([tf_.transform.rotation.x, tf_.transform.rotation.y,
                                                 tf_.transform.rotation.z, tf_.transform.rotation.w])
            pose = np.asmatrix([r1[0:3], r2[0:3], r3[0:3]]) * \
                   np.asmatrix([[tf_.transform.translation.x],[tf_.transform.translation.y],[tf_.transform.translation.z]])
            (x, y, z) = (pose.item(0), pose.item(1), pose.item(2))
        except tf.LookupException or tf.ConnectivityException or tf.ExtrapolationException:
            return -1

        return x, y, z

    # Send SIGNAL to Control Drone Service to Execute Command
    def sent_control_command(self, command):

        rospy.wait_for_service(self.control_drone_srv_name)
        try:
            drone_execute_result = self.control_drone(command)
        except rospy.ServiceException, e:
            print "Service call failed: %s" % e
        return drone_execute_result.status
class SetObjectTarget(smach.State):
    def __init__(self):
        smach.State.__init__(self, outcomes=['target_set', 'no_new_object'],
                             input_keys=['sot_object_array',
                                         'sot_ork_frame',
                                         'sot_picked_objects',
                                         'sot_target_object',
                                         'sot_grasp_target_pose'],
                             output_keys=['sot_target_object',
                                          'sot_grasp_target_pose'])
        self.ork_srv = rospy.ServiceProxy('get_object_info', GetObjectInformation)
        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)

    def execute(self, ud):
        rospy.logdebug("Entered 'SET_OBJECT_TARGET' state.")

        new_target = ''
        is_new_obj = False

        # loop through all found objects
        for i in range(len(ud.sot_object_array)):

            # get object info, mainly its name
            try:
                req = GetObjectInformationRequest()
                req.type = ud.sot_object_array[i].type
                res = self.ork_srv(req)
                # if the object hasn't been grasped yet, make the object the current grasp target
                is_new_obj = True
                if any(res.information.name in s for s in ud.sot_picked_objects):
                    is_new_obj = False

                if is_new_obj:
                    new_target = res.information.name

            except rospy.ServiceException:
                rospy.logerr("GetObjectInformation service request failed.")

            # if a new object has been detected
            if new_target and is_new_obj:

                # get the transform from ork's frame to odom

                transform = self.tf_buffer.lookup_transform('odom', ud.sot_ork_frame, rospy.Time(0))
                # get the object's pose in odom frame
                ud.sot_grasp_target_pose = do_transform_pose(ud.sot_object_array[i].pose.pose, transform)
                ud.sot_target_object = new_target
                ud.sot_grasp_target_pose.pose.position.z += 0.08
                ud.sot_grasp_target_pose.pose.position.x -= 0.15

                print "OBJECT NAME : " + new_target
                print "POSE : "
                print ud.sot_grasp_target_pose
                check_call([PATH_TO_PDF_CREATOR], shell=True)
                check_call(['~/sara_ws/src/PDF_creator/create_pdf.sh'], shell=True)
                return 'target_set'
                # rospy.logerr("Could not get transform from " + ud.sot_ork_frame + " to 'odom'.")

        return 'no_new_object'
Example #3
0
class TransformHandler():
    def __init__(self, gt_frame, est_frame, max_time_between=0.01):
        self.gt_frame = gt_frame
        self.est_frame = est_frame
        self.frames = [gt_frame, est_frame]

        self.tf_buffer = Buffer(cache_time=rospy.Duration(max_time_between))
        self.__tf_listener = TransformListener(self.tf_buffer)

        #self.warn_timer = rospy.Timer(rospy.Duration(5), self.__warn_timer_cb)

    def __warn_timer_cb(self, evt):

        available_frames = self.tf_buffer.all_frames_as_string()
        avail = True
        for frame in self.frames:
            if frame not in available_frames:
                rospy.logwarn('Frame {} has not been seen yet'.format(frame))
                avail = False
        if avail:
            self.warn_timer.shutdown()

    def get_transform(self, fixed_frame, target_frame):
        # caller should handle the exceptions
        return self.tf_buffer.lookup_transform(target_frame, fixed_frame,
                                               rospy.Time(0))
Example #4
0
class SimpleMapper(Node):
    def __init__(self, name):
        super().__init__(name)

        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer, self)
        self.create_timer(SAMPLE_PERIOD, self.log_pose)
        self.time_since_start = 0
        self.file = open('odom_log.csv', 'w')

    def log_pose(self):
        rotation = None
        translation = None
        try:
            tf = self.tf_buffer.lookup_transform('odom', 'base_link',
                                                 Time(sec=0, nanosec=0))
            rotation = quaternion_to_euler(tf.transform.rotation)[0]
            translation = [
                tf.transform.translation.x, tf.transform.translation.y
            ]
        except (LookupException, ConnectivityException,
                ExtrapolationException) as e:
            print('No required transformation found: `{}`'.format(str(e)))
            return

        if translation[0] or translation[
                1] or rotation or self.time_since_start:
            self.time_since_start += SAMPLE_PERIOD
            output = f'{self.time_since_start:.1f},{translation[0]:.3f},{translation[1]:.3f},{rotation:.2f}\n'
            print(output)
            self.file.write(output)
Example #5
0
class OfflineTfBuffer():
    def __init__(self, transforms):
        self.t_front = transforms[0].header.stamp
        self.t_back = transforms[-1].header.stamp
        self.buff = Buffer(cache_time=(self.t_back - self.t_front),
                           debug=False)
        for transform in transforms:
            self.buff.set_transform(transform, '/auth')

    def LookupTransform(self, target_frame, source_frame, times):
        return [
            self.buff.lookup_transform(target_frame, source_frame, t)
            for t in times
        ]

    def AvailableTimeRange(self, target_frame, source_frame, times):
        if times[0] > self.t_back or times[-1] < self.t_front:
            return []
        idx_front = 0
        idx_back = len(times) - 1
        while times[idx_front] < self.t_front:
            idx_front += 1
        while times[idx_back] > self.t_back:
            idx_back -= 1
        return (idx_front, idx_back + 1)

    def CanTransform(self, target_frame, source_frame, times):
        return self.buff.can_transform(
            target_frame, source_frame, times[0]) and self.buff.can_transform(
                target_frame, source_frame, times[-1])
        def align_cb(userdata, default_goal):
            rospy.logdebug("Entered align base callback.")
            align_goal = MoveBaseGoal()
            tf_buffer = Buffer()
            tf_listener = TransformListener(tf_buffer)

            tf_stamped = tf_buffer.lookup_transform('map', 'base_link', rospy.Time(0), rospy.Duration(10))

            # we don't want the robot to translate, only to rotate
            align_goal.target_pose.pose.position.x = tf_stamped.transform.translation.x
            align_goal.target_pose.pose.position.y = tf_stamped.transform.translation.y

            # get orientation to face waypoint 2
            desired_yaw = atan2(userdata.al_cb_waypoints[1].pose.position.y - align_goal.target_pose.pose.position.y,
                                userdata.al_cb_waypoints[1].pose.position.x - align_goal.target_pose.pose.position.x)
            q = tf_conversions.transformations.quaternion_from_euler(0.0, 0.0, desired_yaw)
            align_goal.target_pose.pose.orientation.x = q[0]
            align_goal.target_pose.pose.orientation.y = q[1]
            align_goal.target_pose.pose.orientation.z = q[2]
            align_goal.target_pose.pose.orientation.w = q[3]

            align_goal.target_pose.header.frame_id = 'map'
            align_goal.target_pose.header.stamp = rospy.Time.now()

            return align_goal
class AttemptMonitor(smach.State):
    def __init__(self):
        smach.State.__init__(self, outcomes=['monitoring_done', 'wp2_case', 'monitor_failed'],
                             input_keys=['ma_current_attempt', 'ma_attempt_limit', 'ma_target_wp', 'ma_waypoints',
                                         'ma_wp_str'],
                             output_keys=['ma_current_attempt', 'ma_target_wp'])

        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)

        self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
        self.face_cmd = rospy.Publisher('/face_mode', UInt8, queue_size=1, latch=True)

        self.grasp_distance = 1.6

    def execute(self, ud):
        rospy.logdebug("Entered 'MONITOR_ATTEMPTS' state.")

        # attempt limit not reached, try to reach target again
        if ud.ma_current_attempt < ud.ma_attempt_limit:

            if ud.ma_target_wp == 2:
                """
                # wp2 is a special case
                we know that wp2 is initially blocked by an obstacle and cannot be reached until the obstacle move or is moved
                we consider the robot has reached the target if it is within grasp distance
                """

                tf_stamped = self.tf_buffer.lookup_transform('map', 'base_link', rospy.Time(0))

                if sqrt((ud.ma_waypoints[1].pose.position.x - tf_stamped.transform.translation.x) ** 2 +
                        (ud.ma_waypoints[1].pose.position.y - tf_stamped.transform.translation.y) ** 2) < self.grasp_distance:
                    return 'wp2_case'
                else:
                    if ud.ma_current_attempt < ud.ma_attempt_limit:
                        ud.ma_current_attempt += 1

                    else:
                        ud.ma_current_attempt = 1
                        tts_msg = String()
                        tts_msg.data = "I can not reach" + ud.ma_wp_str[ud.ma_target_wp - 1] + "." + \
                                       "I am moving toward the next waypoint."
                        self.tts_pub.publish(tts_msg)
                        ud.ma_target_wp += 1

                    return 'monitoring_done'
            else:

                ud.ma_current_attempt += 1
                return 'monitoring_done'
        else:
            # attempt limit reached, skip to the next goal
            ud.ma_current_attempt = 1
            self.face_cmd.publish(YELLOW_FACE)
            tts_msg = String()
            tts_msg.data = "I can not reach" + ud.ma_wp_str[ud.ma_target_wp - 1] + "." + "I am moving toward the next waypoint."
            self.tts_pub.publish(tts_msg)
            ud.ma_target_wp += 1
            return 'monitoring_done'
class GetDropPose(smach.State):
    def __init__(self):
        smach.State.__init__(self, outcomes=['drop_pose_acquired'],
                             output_keys=['drop_pose'])

        self.tabletop_sub = rospy.Subscriber('/object_recognition/table_array', TableArray, self.tabletop_cb, queue_size=10)
        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)

        self.msg_received = False

        self.mutex = threading.Lock()

        self.tables = TableArray()

    def tabletop_cb(self, table_array):

        self.mutex.acquire()
        self.msg_received = True
        self.tables = table_array
        self.mutex.release()

    def execute(self, ud):

        loop_again = True

        while loop_again:
            self.mutex.acquire()
            if self.msg_received:
                loop_again = False
            self.mutex.release()
            rospy.sleep(rospy.Duration(1))

        transform = self.tf_buffer.lookup_transform('odom', self.tables.header.frame_id, rospy.Time(0))

        drop_pose_z = 1.5
        tmp = PoseStamped()
        lst = []

        for t in range(len(self.tables.tables)):
            tmp.pose = self.tables.tables[t].pose
            tmp.header.frame_id = self.tables.header.frame_id
            tmp.header.stamp = rospy.Time.now()

            eval_pose = do_transform_pose(tmp, transform)

            rpy = tf_conversions.transformations.euler_from_quaternion(eval_pose.pose.orientation)

            if rpy[2]**2 > 0.90:
                lst.append([(eval_pose.pose.position.z - drop_pose_z)**2, eval_pose])

        lst.sort()

        ud.drop_pose = lst[0][1]

        print ud.drop_pose

        return 'drop_pose_acquired'
Example #9
0
class TfBridge(object):
    """ Utility class to interface with /tf2 """
    def __init__(self):
        """ """
        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)
        self.tf_broadcaster = TransformBroadcaster()

    def publish_pose_to_tf(self, pose, source_frame, target_frame, time=None):
        """ Publish the given pose to /tf2 """
        msg = pose.to_msg()
        transform = TransformStamped()
        transform.child_frame_id = target_frame
        transform.header.frame_id = source_frame
        if time is not None:
            transform.header.stamp = time
        else:
            transform.header.stamp = rospy.Time().now()
        transform.transform.translation = msg.position
        transform.transform.rotation = msg.orientation
        self.tf_broadcaster.sendTransform(transform)

    def get_pose_from_tf(self, source_frame, target_frame, time=None):
        """ Get the pose from /tf2 """
        try:
            if time is not None:
                trans = self.tf_buffer.lookup_transform(
                    source_frame, target_frame, time)
            else:
                trans = self.tf_buffer.lookup_transform(
                    source_frame, target_frame, rospy.Time(0))
            x = trans.transform.translation.x
            y = trans.transform.translation.y
            z = trans.transform.translation.z

            rx = trans.transform.rotation.x
            ry = trans.transform.rotation.y
            rz = trans.transform.rotation.z
            rw = trans.transform.rotation.w
            pose = Vector6D(x=x, y=y, z=z).from_quaternion(rx, ry, rz, rw)
            return True, pose
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException) as e:
            rospy.logwarn("[perception] Exception occured: {}".format(e))
            return False, None
class TfWrapper(object):
    def __init__(self, buffer_size=2):
        self.tfBuffer = Buffer(rospy.Duration(buffer_size))
        self.tf_listener = TransformListener(self.tfBuffer)
        self.tf_static = StaticTransformBroadcaster()
        self.tf_frequency = rospy.Duration(1.0)
        self.broadcasting_frames = []
        self.broadcasting_frames_lock = Lock()
        rospy.sleep(0.1)

    def transform_pose(self, target_frame, pose):
        try:
            transform = self.tfBuffer.lookup_transform(
                target_frame,
                pose.header.frame_id,  # source frame
                pose.header.stamp,
                rospy.Duration(2.0))
            new_pose = do_transform_pose(pose, transform)
            return new_pose
        except ExtrapolationException as e:
            rospy.logwarn(e)

    def lookup_transform(self, target_frame, source_frame):
        p = PoseStamped()
        p.header.frame_id = source_frame
        p.pose.orientation.w = 1.0
        return self.transform_pose(target_frame, p)

    def add_frame_from_pose(self, name, pose_stamped):
        with self.broadcasting_frames_lock:
            frame = TransformStamped()
            frame.header = pose_stamped.header
            frame.child_frame_id = name
            frame.transform.translation = pose_stamped.pose.position
            frame.transform.rotation = pose_stamped.pose.orientation
            self.broadcasting_frames.append(frame)

    def start_frame_broadcasting(self):
        self.tf_broadcaster = tf.TransformBroadcaster()
        self.tf_timer = rospy.Timer(self.tf_frequency, self.broadcasting_cb)

    def broadcasting_cb(self, data):
        with self.broadcasting_frames_lock:
            for frame in self.broadcasting_frames:
                frame.header.stamp = rospy.get_rostime()
                self.tf_broadcaster.sendTransformMessage(frame)

    def broadcast_static_frame(self, name, pose_stamped):
        frame = TransformStamped()
        frame.header = pose_stamped.header
        frame.child_frame_id = name
        frame.transform.translation = pose_stamped.pose.position
        frame.transform.rotation = pose_stamped.pose.orientation
        self.tf_static.sendTransform(frame)
Example #11
0
class ProductDistanceServer(Node):
    def __init__(self):
        super().__init__('product_distance_server')
        self.srv = self.create_service(ProductDistance, 'product_distance',
                                       self.product_distance_callback)
        self.tfBuffer = Buffer()
        self.listener = TransformListener(self.tfBuffer, self)
        self.get_logger().info("Product Distance Server is ready")

    def distance(self, P1, P2):
        dist = math.sqrt(
            math.pow(P2.x - P1.x, 2) + math.pow(P2.y - P1.y, 2) +
            math.pow(P2.z - P1.z, 2) * 1.0)
        return dist

    def product_distance_callback(self, request, response):
        try:
            trans = self.tfBuffer.lookup_transform('base_link',
                                                   request.source_frame,
                                                   Time())

            #Doing this because TransformStamped return Vector3 instead of Point
            frame_position = Point()
            frame_position.x = trans.transform.translation.x
            frame_position.y = trans.transform.translation.y
            frame_position.z = trans.transform.translation.z

            distance = self.distance(request.product_position, frame_position)

            #Find 3D distance -> https://stackoverflow.com/questions/20184992/finding-3d-distances-using-an-inbuilt-function-in-python
            # p0 = np.array([request.product_position.x,request.product_position.y,0.0])
            # p1 = np.array([trans.transform.translation.x,trans.transform.translation.y,0.0])
            # distance = np.linalg.norm(p0 - p1)

            # distance=math.dist([request.product_position.x,request.product_position.y],[trans.transform.translation.x,trans.transform.translation])
            #distance=math.dist([1,2],[2,3])

            #self.get_logger().info("Distance: %s -> product=(%s,%s,%s) target=(%s,%s,%s)"%(distance,p0[0],p0[1],p0[2],p1[0],p1[1],p1[2]))

            response = ProductDistance.Response()
            response.transform = trans
            response.distance = distance
            response.frame_position = frame_position

            return response

        except Exception as ex:
            template = "Error while computing transform. An exception of type {0} occurred: {1!r}"
            message = template.format(type(ex).__name__, ex.args)
            self.get_logger().error(message)

            response = ProductDistance.Response()

            return response
    def __init__(self):

        _node = Node("tf2twist")

        qos_profile = QoSProfile(depth=10)
        # loop_rate = _node.create_rate(100)

        tfBuffer = Buffer()
        listener = TransformListener(tfBuffer, _node, qos=qos_profile)

        twist_data = geometry_msgs.msg.Twist()

        pub_cmd_vel = _node.create_publisher(geometry_msgs.msg.Twist,
                                             "cmd_vel", qos_profile)

        # self.transfromstamped =
        try:
            while rclpy.ok():
                rclpy.spin_once(_node)

                now = _node.get_clock().now() - rclpy.duration.Duration(
                    seconds=0, nanoseconds=1000000)
                try:
                    trans = tfBuffer.lookup_transform(
                        'odom_frame', 'base_link', now,
                        rclpy.duration.Duration(seconds=0, nanoseconds=0))
                    # print(trans)
                    # print(trans.transform.rotation.x)

                    roll, pitch, yaw = self.quaternion_to_euler(
                        trans.transform.rotation.x, trans.transform.rotation.y,
                        trans.transform.rotation.z, trans.transform.rotation.w)

                    # print(yaw)
                    if (yaw > 0.3):
                        twist_data.angular.z = 70.0  #yaw*125*1.3
                    elif (yaw < -0.3):
                        twist_data.angular.z = -70.0  #yaw*125*1.3
                    else:
                        twist_data.angular.z = 0.0
                    pub_cmd_vel.publish(twist_data)

                except (LookupException, LookupError, ConnectionAbortedError,
                        ConnectionError, ConnectionRefusedError,
                        ConnectionResetError, ExtrapolationException,
                        ConnectivityException) as e:
                    # print(e)
                    pass

        except (KeyboardInterrupt):
            pass
class StartGuiding(smach.State):
    def __init__(self):
        smach.State.__init__(self,
                             outcomes=['go_back'],
                             input_keys=['sg_waypoints'],
                             output_keys=['sg_target_wp'])
        self.tts_pub = rospy.Publisher('sara_tts',
                                       String,
                                       queue_size=1,
                                       latch=True)
        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)
        self.face_cmd = rospy.Publisher('/face_mode',
                                        UInt8,
                                        queue_size=1,
                                        latch=True)

    def execute(self, ud):

        tts_msg = String()

        rospy.sleep(rospy.Duration(5))

        self.face_cmd.publish(YELLOW_FACE)
        tts_msg.data = "I can not guide you back safely. I will go back to the stating location on my own."
        self.tts_pub.publish(tts_msg)

        tf_stamped = self.tf_buffer.lookup_transform('map', 'base_link',
                                                     rospy.Time(0))

        lst = []

        for i in range(len(ud.sg_waypoints)):
            distance = sqrt((tf_stamped.transform.translation.x -
                             ud.sg_waypoints[i].pose.position.x)**2 +
                            (tf_stamped.transform.translation.y -
                             ud.sg_waypoints[i].pose.position.y)**2)
            lst.append([distance, i])

        lst.sort()

        if lst[0][1] == 0:
            ud.sg_target_wp = 0
        else:
            ud.sg_target_wp = lst[0][1] - 1

        return 'go_back'
Example #14
0
class TfWrapper(object):
    def __init__(self, buffer_size=2):
        self.tfBuffer = Buffer(rospy.Duration(buffer_size))
        self.tf_listener = TransformListener(self.tfBuffer)
        rospy.sleep(0.1)

    def transformPose(self, target_frame, pose):
        try:
            transform = self.tfBuffer.lookup_transform(
                target_frame,
                pose.header.frame_id,  # source frame
                pose.header.stamp,
                rospy.Duration(1.0))
            new_pose = do_transform_pose(pose, transform)
            return new_pose
        except ExtrapolationException as e:
            rospy.logwarn(e)
Example #15
0
class Environment:
    def __init__(self):
        self.buffer = Buffer()
        self.listener = TransformListener(self.buffer)

    def get_transform(self, source_frame, target_frame, timeout=3):
        frames = '"{}" w.r.t. "{}"'.format(target_frame, source_frame)
        if rospy.get_param('verbose'):
            rospy.loginfo('Acquiring transform: ' + frames)
        start = rospy.get_time()
        while not rospy.is_shutdown() and rospy.get_time() - start < timeout:
            try:
                return self.buffer.lookup_transform(source_frame, target_frame,
                                                    rospy.Time())
            except TransformException:
                pass
        if rospy.get_param('verbose'):
            rospy.logwarn('Failed to find transform: ' + frames)
class WaitForObstacle(smach.State):
    def __init__(self):
        smach.State.__init__(self, outcomes=['wait_over'],
                             input_keys=['wait_target_wp', 'wait_waypoints'],
                             output_keys=['wait_target_wp'])
        self.move_base_srv = rospy.ServiceProxy('/move_base/make_plan', GetPlan)
        self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)

    def execute(self, ud):
        rospy.logdebug("Entered 'WAIT_FOR_OBSTACLE' state.")

        nb_loop = 0
        max_nb_loop = 5

        start_pose = PoseStamped()
        tf_stamped = self.tf_buffer.lookup_transform('map', 'base_link', rospy.Time(0))
        start_pose = do_transform_pose(start_pose, tf_stamped)

        goal_pose = ud.wait_waypoints[1]  # wp2

        goal_tolerance = 0.10

        while nb_loop < max_nb_loop:
            try:
                res = self.move_base_srv(start=start_pose, goal=goal_pose, tolerance=goal_tolerance)
                if res.plan.poses:
                    break
            except rospy.ServiceException:
                pass

            nb_loop += 1
            rospy.sleep(rospy.Duration(5))

        if nb_loop > max_nb_loop:
            ud.wait_target_wp += 1
            tts_msg = String()
            tts_msg.data = "I detect that the path to the waypoint is still obstructed."
            self.tts_pub.publish(tts_msg)
            tts_msg.data = "I have waited long enough. I am moving toward the next waypoint."
            self.tts_pub.publish(tts_msg)

        return 'wait_over'
class StartGuiding(smach.State):
    def __init__(self):
        smach.State.__init__(self, outcomes=['go_back'],
                             input_keys=['sg_waypoints'],
                             output_keys=['sg_target_wp'])
        self.tts_pub = rospy.Publisher('sara_tts', String, queue_size=1, latch=True)
        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)
        self.face_cmd = rospy.Publisher('/face_mode', UInt8, queue_size=1, latch=True)

    def execute(self, ud):

        tts_msg = String()

        rospy.sleep(rospy.Duration(5))

        self.face_cmd.publish(YELLOW_FACE)
        tts_msg.data = "I can not guide you back safely. I will go back to the stating location on my own."
        self.tts_pub.publish(tts_msg)

        tf_stamped = self.tf_buffer.lookup_transform('map', 'base_link', rospy.Time(0))

        lst = []

        for i in range(len(ud.sg_waypoints)):
            distance = sqrt((tf_stamped.transform.translation.x - ud.sg_waypoints[i].pose.position.x)**2 +
                            (tf_stamped.transform.translation.y - ud.sg_waypoints[i].pose.position.y)**2)
            lst.append([distance, i])

        lst.sort()

        if lst[0][1] == 0:
            ud.sg_target_wp = 0
        else:
            ud.sg_target_wp = lst[0][1] - 1

        return 'go_back'
Example #18
0
def odom_callback(data):
    global camera_odom, camera_pose
    camera_odom = data
    camera_odom.pose.pose.position = camera_pose.pose.position
    camera_odom.twist.twist.linear.x = -camera_odom.twist.twist.linear.x
    camera_odom.twist.twist.linear.y = -camera_odom.twist.twist.linear.y
    
rospy.init_node(vehicle_type+"_"+vehicle_id+'_ego_transfer')
rospy.Subscriber("/t265/odom/sample", Odometry, odom_callback, queue_size=1)
pose_pub = rospy.Publisher("/camera_pose", PoseStamped, queue_size=1)
odom_pub = rospy.Publisher("/camera_odom", Odometry, queue_size=1)
rate = rospy.Rate(20) 
tfBuffer = Buffer()
tflistener = TransformListener(tfBuffer)

while not rospy.is_shutdown():
    try:
        tfstamped = tfBuffer.lookup_transform('map', 'camera', rospy.Time(0))
        camera_pose.header.stamp = rospy.Time.now()
        camera_pose.pose.position.x = tfstamped.transform.translation.x
        camera_pose.pose.position.y = tfstamped.transform.translation.y
        camera_pose.pose.position.z = tfstamped.transform.translation.z
        camera_pose.pose.orientation =  tfstamped.transform.rotation
        pose_pub.publish(camera_pose) 
        odom_pub.publish(camera_odom)
        rate.sleep()
    except:
        rate.sleep()

  
class UnderworldsSimulation(object):
    def __init__(self):
        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)
        self.tf_broadcaster = TransformBroadcaster()

        self.base_frame_id = rospy.get_param("~base_frame_id",
                                             "base_footprint")
        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")

        self.env_urdf_file_path = rospy.get_param("~env_urdf_file_path", "")
        self.cad_models_additional_search_path = rospy.get_param(
            "~cad_models_additional_search_path", "")

        self.static_entities_config_filename = rospy.get_param(
            "~static_entities_config_filename", "")

        self.bridge = CvBridge()

        self.entity_id_map = {}
        self.reverse_entity_id_map = {}

        self.joint_id_map = {}
        self.reverse_joint_id_map = {}

        self.constraint_id_map = {}

        self.markers_id_map = {}

        self.use_gui = rospy.get_param("~use_gui", True)
        if self.use_gui is True:
            self.client_simulator_id = p.connect(p.GUI)
        else:
            self.client_simulator_id = p.connect(p.DIRECT)

        if self.cad_models_additional_search_path != "":
            p.setAdditionalSearchPath(self.cad_models_additional_search_path)

        if self.env_urdf_file_path != "":
            self.load_urdf(self.global_frame_id, self.env_urdf_file_path,
                           [0, 0, 0], [0, 0, 0, 1])

        if self.static_entities_config_filename != "":
            with open(self.static_entities_config_filename, 'r') as stream:
                static_entities = yaml.safe_load(stream)
                for entity in static_entities:
                    start_position = [
                        entity["position"]["x"], entity["position"]["y"],
                        entity["position"]["z"]
                    ]
                    start_orientation = [
                        entity["orientation"]["x"], entity["orientation"]["y"],
                        entity["orientation"]["z"]
                    ]
                    start_orientation = quaternion_from_euler(
                        entity["orientation"]["x"], entity["orientation"]["y"],
                        entity["orientation"]["z"], 'rxyz')
                    self.load_urdf(entity["id"], entity["file"],
                                   start_position, start_orientation)

        p.setGravity(0, 0, -10)
        p.setRealTimeSimulation(0)

        self.rgb_image_topic = rospy.get_param("~rgb_image_topic",
                                               "/camera/rgb/image_raw")
        self.rgb_camera_info_topic = rospy.get_param(
            "~rgb_camera_info_topic", "/camera/rgb/camera_info")

        self.depth_image_topic = rospy.get_param("~depth_image_topic",
                                                 "/camera/depth/image_raw")
        self.depth_camera_info_topic = rospy.get_param(
            "~depth_camera_info_topic", "/camera/depth/camera_info")

        self.tracks_topic = rospy.get_param("~tracks_topic", "tracks")

        self.position_tolerance = rospy.get_param("~position_tolerance", 0.001)
        self.orientation_tolerance = rospy.get_param("~orientation_tolerance",
                                                     0.001)

        self.robot_urdf_file_path = rospy.get_param("~robot_urdf_file_path",
                                                    "r2d2.urdf")

        self.joint_states_listener = JointStatesListener(self)

        self.perspective_estimator = PerspectiveEstimator(self)

        self.use_depth = rospy.get_param("~use_depth", False)

        self.publish_markers = rospy.get_param("publish_markers", True)

        self.publish_perspectives = rospy.get_param("publish_perspectives",
                                                    True)
        if self.publish_perspectives is True:
            self.perspective_publisher = rospy.Publisher("perspective_viz",
                                                         Image,
                                                         queue_size=1)

        if self.publish_markers is True:
            self.marker_publisher = rospy.Publisher("internal_simulation_viz",
                                                    MarkerArray,
                                                    queue_size=1)

        self.track_sub = rospy.Subscriber(self.tracks_topic,
                                          SceneChangesStamped,
                                          self.observation_callback)

        if self.publish_markers is True:
            self.simulation_timer = rospy.Timer(rospy.Duration(1.0 / 24.0),
                                                self.visualization_callback)

    def load_urdf(self, id, filename, t, q, remove_friction=False):
        try:
            base_link_sim_id = p.loadURDF(
                filename,
                t,
                q,
                flags=p.URDF_ENABLE_SLEEPING
                or p.URDF_ENABLE_CACHED_GRAPHICS_SHAPES)
        except Exception as e:
            rospy.logwarn("[simulation] Error loading '{}': {}".format(
                filename, e))
            return
        # If file successfully loaded
        if base_link_sim_id >= 0:
            self.entity_id_map[id] = base_link_sim_id
            # Create a joint map to ease exploration
            self.reverse_entity_id_map[base_link_sim_id] = id
            self.joint_id_map[base_link_sim_id] = {}
            self.reverse_joint_id_map[base_link_sim_id] = {}
            for i in range(
                    0,
                    p.getNumJoints(base_link_sim_id,
                                   physicsClientId=self.client_simulator_id)):
                info = p.getJointInfo(base_link_sim_id,
                                      i,
                                      physicsClientId=self.client_simulator_id)
                self.joint_id_map[base_link_sim_id][info[1]] = info[0]
                self.reverse_joint_id_map[base_link_sim_id][info[0]] = info[1]
            rospy.loginfo(
                "[simulation] '{}' File successfully loaded".format(filename))
        else:
            raise ValueError(
                "Invalid URDF file provided: '{}' ".format(filename))

    def get_transform_from_tf2(self, source_frame, target_frame, time=None):
        try:
            if time is not None:
                trans = self.tf_buffer.lookup_transform(
                    source_frame, target_frame, time)
            else:
                trans = self.tf_buffer.lookup_transform(
                    source_frame, target_frame, rospy.Time(0))
            x = trans.transform.translation.x
            y = trans.transform.translation.y
            z = trans.transform.translation.z

            rx = trans.transform.rotation.x
            ry = trans.transform.rotation.y
            rz = trans.transform.rotation.z
            rw = trans.transform.rotation.w

            return True, [x, y, z], [rx, ry, rz, rw]
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException) as e:
            rospy.logwarn("[simulation] Exception occured: {}".format(e))
            return False, [0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]

    def update_constraint(self, id, t, q):
        if id not in self.entity_id_map:
            raise ValueError(
                "Entity <{}> is not loaded into the simulator".format(id))
        if id in self.constraint_id[id]:
            p.changeConstraint(self.constraint_id[id],
                               t,
                               jointChildFrameOrientation=q,
                               maxForce=50)
        else:
            self.constraint_id_map[id] = p.createConstraint(
                self.entity_id_map[id],
                -1,
                -1,
                -1,
                p.JOINT_FIXED, [0, 0, 0], [0, 0, 0],
                t,
                childFrameOrientation=q)

    def remove_constraint(self, id):
        if id not in self.constraint_id_map:
            raise ValueError(
                "Constraint for entity <{}> not created".format(id))
        p.removeConstraint(self.constraint_id[id])

    def add_shape(self, track):
        if track.has_shape is True:
            if track.shape.type == PrimitiveShape.CYLINDER:
                position = []
                orientation = []
                visual_shape_id = p.createVisualShape(
                    p.GEOM_CYLINDER,
                    radius=track.shape.dimensions[0],
                    length=track.shape.dimensions[1])
                collision_shape_id = p.createCollisionShape(
                    p.GEOM_CYLINDER,
                    radius=track.shape.dimensions[0],
                    length=track.shape.dimensions[1])
                entity_id = p.createMultiBody(
                    baseCollisionShapeIndex=collision_shape_id,
                    baseVisualShapeIndex=visual_shape_id,
                    basePosition=position,
                    baseOrientation=orientation,
                    flags=p.URDF_ENABLE_SLEEPING
                    or p.URDF_ENABLE_CACHED_GRAPHICS_SHAPES)
                if entity_id >= 0:
                    self.entity_id_map[track.id] = entity_id
                    self.reverse_entity_id_map[entity_id] = track.id
            elif track.shape.type == PrimitiveShape.MESH:
                pass
            else:
                raise NotImplementedError(
                    "Only cylinder shapes for unknown objects supported at the moment."
                )

    def remove_shape(self, track):
        pass

    def update_entity(self, id, t, q):
        if id not in self.entity_id_map:
            raise ValueError(
                "Entity <{}> is not loaded into the simulator".format(id))
        base_link_sim_id = self.entity_id_map[id]
        t_current, q_current = p.getBasePositionAndOrientation(
            base_link_sim_id)
        update_position = not np.allclose(
            np.array(t_current), t, atol=self.position_tolerance)
        update_orientation = not np.allclose(
            np.array(q_current), q, atol=self.position_tolerance)
        if update_position is True or update_orientation is True:
            p.resetBasePositionAndOrientation(
                base_link_sim_id,
                t,
                q,
                physicsClientId=self.client_simulator_id)

    def publish_marker_array(self):
        marker_array = MarkerArray()
        marker_array.markers = []
        now = rospy.Time()
        for sim_id in range(0, p.getNumBodies()):
            visual_shapes = p.getVisualShapeData(sim_id)

            for shape in visual_shapes:
                marker = Marker()
                entity_id = shape[0]
                link_id = shape[1]
                type = shape[2]
                dimensions = shape[3]
                mesh_file_path = shape[4]
                position = shape[5]
                orientation = shape[6]
                rgba_color = shape[7]

                if link_id != -1:
                    link_state = p.getLinkState(sim_id, link_id)
                    t_link = link_state[0]
                    q_link = link_state[1]
                    t_inertial = link_state[2]
                    q_inertial = link_state[3]

                    tf_world_link = np.dot(translation_matrix(t_link),
                                           quaternion_matrix(q_link))
                    tf_inertial_link = np.dot(translation_matrix(t_inertial),
                                              quaternion_matrix(q_inertial))
                    world_transform = np.dot(tf_world_link,
                                             np.linalg.inv(tf_inertial_link))
                else:
                    t_link, q_link = p.getBasePositionAndOrientation(sim_id)
                    world_transform = np.dot(translation_matrix(t_link),
                                             quaternion_matrix(q_link))

                marker.header.frame_id = self.global_frame_id
                marker.header.stamp = now
                marker.id = entity_id + (
                    link_id + 1) << 24  # same id convention than bullet
                marker.action = Marker.MODIFY
                marker.ns = self.reverse_entity_id_map[sim_id]

                trans_shape = np.dot(translation_matrix(position),
                                     quaternion_matrix(orientation))
                shape_transform = np.dot(world_transform, trans_shape)
                position = translation_from_matrix(shape_transform)
                orientation = quaternion_from_matrix(shape_transform)

                marker.pose.position.x = position[0]
                marker.pose.position.y = position[1]
                marker.pose.position.z = position[2]

                marker.pose.orientation.x = orientation[0]
                marker.pose.orientation.y = orientation[1]
                marker.pose.orientation.z = orientation[2]
                marker.pose.orientation.w = orientation[3]

                if len(rgba_color) > 0:
                    marker.color.r = rgba_color[0]
                    marker.color.g = rgba_color[1]
                    marker.color.b = rgba_color[2]
                    marker.color.a = rgba_color[3]

                if type == p.GEOM_SPHERE:
                    marker.type = Marker.SPHERE
                    marker.scale.x = dimensions[0] * 2.0
                    marker.scale.y = dimensions[0] * 2.0
                    marker.scale.z = dimensions[0] * 2.0
                elif type == p.GEOM_BOX:
                    marker.type = Marker.CUBE
                    marker.scale.x = dimensions[0]
                    marker.scale.y = dimensions[1]
                    marker.scale.z = dimensions[2]
                elif type == p.GEOM_CAPSULE:
                    marker.type = Marker.SPHERE
                elif type == p.GEOM_CYLINDER:
                    marker.type = Marker.CYLINDER
                    marker.scale.x = dimensions[1] * 2.0
                    marker.scale.y = dimensions[1] * 2.0
                    marker.scale.z = dimensions[0]
                elif type == p.GEOM_PLANE:
                    marker.type = Marker.CUBE
                    marker.scale.x = dimensions[0]
                    marker.scale.y = dimensions[1]
                    marker.scale.z = 0.0001
                elif type == p.GEOM_MESH:
                    marker.type = Marker.MESH_RESOURCE
                    marker.mesh_resource = "file://" + mesh_file_path
                    marker.scale.x = dimensions[0]
                    marker.scale.y = dimensions[1]
                    marker.scale.z = dimensions[2]
                    marker.mesh_use_embedded_materials = True
                else:
                    raise NotImplementedError

                marker.lifetime = rospy.Duration(1.0)

                marker_array.markers.append(marker)
        self.marker_publisher.publish(marker_array)

    def observation_callback(self, tracks_msg):
        p.stepSimulation()
        for track in tracks_msg.changes.nodes:
            if track.has_camera is True and track.is_located is True:
                position = track.pose_stamped.pose.pose.position
                orientation = track.pose_stamped.pose.pose.orientation
                t = [position.x, position.y, position.z]
                q = [
                    orientation.x, orientation.y, orientation.z, orientation.w
                ]
                rgb_image, depth_image, detections, viz_frame = self.perspective_estimator.estimate(
                    t, q, track.camera)
                viz_img_msg = self.bridge.cv2_to_imgmsg(viz_frame)
                self.perspective_publisher.publish(viz_img_msg)

    def visualization_callback(self, event):
        self.publish_marker_array()
Example #20
0
    rospy.init_node('turtle1_listener')
    tfBuffer = Buffer()
    listener = TransformListener(tfBuffer)

    rospy.wait_for_service('spawn')
    spawner = rospy.ServiceProxy('spawn', turtlesim.srv.Spawn)
    turtle_name = rospy.get_param('turtle', 'turtle2')
    spawner(4, 2, 0, turtle_name)

    turtle_vel = rospy.Publisher('%s/cmd_vel' % turtle_name,
                                 Twist,
                                 queue_size=1)
    rate = rospy.Rate(10.0)

    while not rospy.is_shutdown():
        try:
            trans = tfBuffer.lookup_transform(turtle_name, 'turtle1',
                                              rospy.Time())
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            rate.sleep()
            continue

        msg = Twist()
        msg.linear.x = 0.5 * math.sqrt(trans.transform.translation.x**2 +
                                       trans.transform.translation.y**2)
        msg.angular.z = 4 * math.atan2(trans.transform.translation.y,
                                       trans.transform.translation.x)

        turtle_vel.publish(msg)
        rate.sleep()
Example #21
0
class SimpleMapper(Node):
    def __init__(self, name):
        super().__init__(name)

        fill_map_param = self.declare_parameter('fill_map', True)

        # Init map related elements
        self.map = [-1] * MAP_WIDTH * MAP_HEIGHT
        self.map_publisher = self.create_publisher(
            OccupancyGrid,
            '/map',
            qos_profile=QoSProfile(
                depth=1,
                durability=DurabilityPolicy.TRANSIENT_LOCAL,
                history=HistoryPolicy.KEEP_LAST,
            ))
        self.tf_publisher = StaticTransformBroadcaster(self)
        tf = TransformStamped()
        tf.header.stamp = self.get_clock().now().to_msg()
        tf.header.frame_id = 'map'
        tf.child_frame_id = 'odom'
        tf.transform.translation.x = 0.0
        tf.transform.translation.y = 0.0
        tf.transform.translation.z = 0.0
        self.tf_publisher.sendTransform(tf)

        # Init laser related elements
        if fill_map_param.value:
            self.tf_buffer = Buffer()
            self.tf_listener = TransformListener(self.tf_buffer, self)
            self.scanner_subscriber = self.create_subscription(
                LaserScan, '/scan', self.update_map, 1)

        # Start publishing the map
        self.publish_map()
        self.create_timer(1, self.publish_map)

    def publish_map(self):
        now = self.get_clock().now()

        msg = OccupancyGrid()
        msg.header.stamp = now.to_msg()
        msg.header.frame_id = 'map'
        msg.info.resolution = RESOLUTION
        msg.info.width = MAP_WIDTH
        msg.info.height = MAP_HEIGHT
        msg.info.origin.position.x = WORLD_ORIGIN_X
        msg.info.origin.position.y = WORLD_ORIGIN_Y
        msg.data = self.map
        self.map_publisher.publish(msg)

    def update_map(self, msg):
        # Determine transformation of laser and robot in respect to odometry
        laser_rotation = None
        laser_translation = None
        try:
            tf = self.tf_buffer.lookup_transform('odom', msg.header.frame_id,
                                                 Time(sec=0, nanosec=0))
            laser_rotation = quaternion_to_euler(tf.transform.rotation)[0]
            laser_translation = tf.transform.translation
        except (LookupException, ConnectivityException,
                ExtrapolationException) as e:
            print('No required transformation found: `{}`'.format(str(e)))
            return

        # Determine position of robot and laser
        world_robot_x = laser_translation.x + WORLD_ORIGIN_X
        world_robot_y = laser_translation.y + WORLD_ORIGIN_Y
        world_laser_xs = []
        world_laser_ys = []
        laser_range_angle = msg.angle_min + laser_rotation
        for laser_range in msg.ranges:
            if laser_range < msg.range_max and laser_range > msg.range_min:
                laser_x = world_robot_x + laser_range * cos(laser_range_angle)
                laser_y = world_robot_y + laser_range * sin(laser_range_angle)
                world_laser_xs.append(laser_x)
                world_laser_ys.append(laser_y)
            laser_range_angle += msg.angle_increment

        # Determine position on map (from world coordinates)
        robot_x = int(world_robot_x / RESOLUTION)
        robot_y = int(world_robot_y / RESOLUTION)
        laser_xs = []
        laser_ys = []
        for world_laser_x, world_laser_y in zip(world_laser_xs,
                                                world_laser_ys):
            laser_x = int(world_laser_x / RESOLUTION)
            laser_y = int(world_laser_y / RESOLUTION)
            laser_xs.append(laser_x)
            laser_ys.append(laser_y)

        # Fill the map based on known readings
        for laser_x, laser_y in zip(laser_xs, laser_ys):
            self.plot_bresenham_line(robot_x, laser_x, robot_y, laser_y)
            self.map[laser_y * MAP_WIDTH + laser_x] = 100

    def plot_bresenham_line(self, x0, x1, y0, y1):
        # Bresenham's line algorithm (https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm)
        dx = abs(x1 - x0)
        sx = 1 if x0 < x1 else -1
        dy = -abs(y1 - y0)
        sy = 1 if y0 < y1 else -1
        err = dx + dy
        while True:
            self.map[y0 * MAP_WIDTH + x0] = 0
            if x0 == x1 and y0 == y1:
                break
            e2 = 2 * err
            if e2 >= dy:
                err += dy
                x0 += sx
            if e2 <= dx:
                err += dx
                y0 += sy
Example #22
0
class HandeyeSampler(object):
    """
    Manages the samples acquired from tf.
    """
    def __init__(self, handeye_parameters):
        self.handeye_parameters = handeye_parameters

        # tf structures
        self.tfBuffer = Buffer()
        """
        used to get transforms to build each sample

        :type: tf2_ros.Buffer
        """
        self.tfListener = TransformListener(self.tfBuffer)
        """
        used to get transforms to build each sample

        :type: tf2_ros.TransformListener
        """
        self.tfBroadcaster = TransformBroadcaster()
        """
        used to publish the calibration after saving it

        :type: tf.TransformBroadcaster
        """

        # internal input data
        self.samples = []
        """
        list of acquired samples

        Each sample is a dictionary going from 'rob' and 'opt' to the relative sampled transform in tf tuple format.

        :type: list[dict[str, ((float, float, float), (float, float, float, float))]]
        """

    def _wait_for_tf_init(self):
        """
        Waits until all needed frames are present in tf.

        :rtype: None
        """
        self.tfBuffer.lookup_transform(
            self.handeye_parameters.robot_base_frame,
            self.handeye_parameters.robot_effector_frame, Time(0),
            Duration(20))
        self.tfBuffer.lookup_transform(
            self.handeye_parameters.tracking_base_frame,
            self.handeye_parameters.tracking_marker_frame, Time(0),
            Duration(60))

    def _get_transforms(self, time=None):
        """
        Samples the transforms at the given time.

        :param time: sampling time (now if None)
        :type time: None|Time
        :rtype: dict[str, ((float, float, float), (float, float, float, float))]
        """
        if time is None:
            time = Time.now()

        # here we trick the library (it is actually made for eye_on_hand only). Trust me, I'm an engineer
        if self.handeye_parameters.eye_on_hand:
            rob = self.tfBuffer.lookup_transform(
                self.handeye_parameters.robot_base_frame,
                self.handeye_parameters.robot_effector_frame, time,
                Duration(10))
        else:
            rob = self.tfBuffer.lookup_transform(
                self.handeye_parameters.robot_effector_frame,
                self.handeye_parameters.robot_base_frame, time, Duration(10))
        opt = self.tfBuffer.lookup_transform(
            self.handeye_parameters.tracking_base_frame,
            self.handeye_parameters.tracking_marker_frame, time, Duration(10))
        return {'robot': rob, 'optical': opt}

    def take_sample(self):
        """
        Samples the transformations and appends the sample to the list.

        :rtype: None
        """
        loginfo("Taking a sample...")
        transforms = self._get_transforms()
        loginfo("Got a sample")
        self.samples.append(transforms)

    def remove_sample(self, index):
        """
        Removes a sample from the list.

        :type index: int
        :rtype: None
        """
        if 0 <= index < len(self.samples):
            del self.samples[index]

    def get_samples(self):
        """
        Returns the samples accumulated so far.
        :rtype: [dict[str, ((float, float, float), (float, float, float, float))]]
        :return: A list of tuples containing the tracking and the robot transform pairs
        """
        return self.samples
Example #23
0
class RobotListener(object):
    def __init__(self, robot_urdf):
        self.global_frame_id = 'map'
        self.bullet_node_id_map = {}
        # Initialize tf2 buffer
        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)
        self.robot_urdf = robot_urdf
        self.base_frame_id = "base_footprint"
        sleep(1.)
        _, t, q = self.get_transform_from_tf2(self.global_frame_id,
                                              self.base_frame_id)
        self.bullet_node_id_map[self.base_frame_id] = p.loadURDF(
            self.robot_urdf, t, q)
        self.init_robot_joint_map()
        self.joint_state_subscriber = rospy.Subscriber(
            "/joint_states", JointState, self.update_robot_joints)

    def init_robot_joint_map(self):
        self.simulator_joint_id_map = {}
        for i in range(
                0,
                p.getNumJoints(self.bullet_node_id_map[self.base_frame_id])):
            info = p.getJointInfo(self.bullet_node_id_map[self.base_frame_id],
                                  i)
            joint_name = info[1]
            self.simulator_joint_id_map[joint_name] = i

    def get_transform_from_tf2(self, source_frame, target_frame):
        try:
            trans = self.tf_buffer.lookup_transform(source_frame, target_frame,
                                                    rospy.Time(0))
            x = trans.transform.translation.x
            y = trans.transform.translation.y
            z = trans.transform.translation.z

            rx = trans.transform.rotation.x
            ry = trans.transform.rotation.y
            rz = trans.transform.rotation.z
            rw = trans.transform.rotation.w

            return True, [x, y, z], [rx, ry, rz, rw]
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            return False, [0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]

    def update_robot_base(self):
        base_link_sim_id = self.bullet_node_id_map[self.base_frame_id]
        success, pos, rot = self.get_transform_from_tf2(
            self.global_frame_id, self.base_frame_id)
        if success:
            p.resetBasePositionAndOrientation(base_link_sim_id, pos, rot)

    def update_robot_joints(self, joint_states_msg):
        self.update_robot_base()
        base_link_sim_id = self.bullet_node_id_map[self.base_frame_id]
        for joint_index in range(0, len(joint_states_msg.name)):
            join_sim_id = self.simulator_joint_id_map[
                joint_states_msg.name[joint_index]]
            p.resetJointState(base_link_sim_id, join_sim_id,
                              joint_states_msg.position[joint_index], 0.0)
Example #24
0
class LandmarkMethodROS(LandmarkMethodBase):
    def __init__(self, img_proc=None):
        super(LandmarkMethodROS,
              self).__init__(device_id_facedetection=rospy.get_param(
                  "~device_id_facedetection", default="cuda:0"))
        self.subject_tracker = FaceEncodingTracker() if rospy.get_param(
            "~use_face_encoding_tracker",
            default=True) else SequentialTracker()
        self.bridge = CvBridge()
        self.__subject_bridge = SubjectListBridge()

        self.camera_frame = rospy.get_param("~camera_frame", "kinect2_link")
        self.ros_tf_frame = rospy.get_param("~ros_tf_frame",
                                            "kinect2_ros_frame")

        self.tf2_broadcaster = TransformBroadcaster()
        self.tf2_buffer = Buffer()
        self.tf2_listener = TransformListener(self.tf2_buffer)
        self.tf_prefix = rospy.get_param("~tf_prefix", default="gaze")
        self.visualise_headpose = rospy.get_param("~visualise_headpose",
                                                  default=True)

        self.pose_stabilizers = {}  # Introduce scalar stabilizers for pose.

        try:
            if img_proc is None:
                tqdm.write("Wait for camera message")
                cam_info = rospy.wait_for_message("/camera_info",
                                                  CameraInfo,
                                                  timeout=None)
                self.img_proc = PinholeCameraModel()
                # noinspection PyTypeChecker
                self.img_proc.fromCameraInfo(cam_info)
            else:
                self.img_proc = img_proc

            if np.array_equal(
                    self.img_proc.intrinsicMatrix().A,
                    np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])):
                raise Exception(
                    'Camera matrix is zero-matrix. Did you calibrate'
                    'the camera and linked to the yaml file in the launch file?'
                )
            tqdm.write("Camera message received")
        except rospy.ROSException:
            raise Exception("Could not get camera info")

        # multiple person images publication
        self.subject_pub = rospy.Publisher("/subjects/images",
                                           MSG_SubjectImagesList,
                                           queue_size=3)
        # multiple person faces publication for visualisation
        self.subject_faces_pub = rospy.Publisher("/subjects/faces",
                                                 Image,
                                                 queue_size=3)

        Server(ModelSizeConfig, self._dyn_reconfig_callback)

        # have the subscriber the last thing that's run to avoid conflicts
        self.color_sub = rospy.Subscriber("/image",
                                          Image,
                                          self.process_image,
                                          buff_size=2**24,
                                          queue_size=3)

    def _dyn_reconfig_callback(self, config, _):
        self.model_points /= (self.model_size_rescale *
                              self.interpupillary_distance)
        self.model_size_rescale = config["model_size"]
        self.interpupillary_distance = config["interpupillary_distance"]
        self.model_points *= (self.model_size_rescale *
                              self.interpupillary_distance)
        self.head_pitch = config["head_pitch"]
        return config

    def process_image(self, color_msg):
        color_img = gaze_tools.convert_image(color_msg, "bgr8")
        timestamp = color_msg.header.stamp

        self.update_subject_tracker(color_img)

        if not self.subject_tracker.get_tracked_elements():
            tqdm.write("\033[2K\033[1;31mNo face found\033[0m", end="\r")
            return

        self.subject_tracker.update_eye_images(self.eye_image_size)

        final_head_pose_images = []
        for subject_id, subject in self.subject_tracker.get_tracked_elements(
        ).items():
            if subject.left_eye_color is None or subject.right_eye_color is None:
                continue
            if subject_id not in self.pose_stabilizers:
                self.pose_stabilizers[subject_id] = [
                    Stabilizer(state_num=2,
                               measure_num=1,
                               cov_process=0.1,
                               cov_measure=0.1) for _ in range(6)
                ]

            success, head_rpy, translation_vector = self.get_head_pose(
                subject.marks, subject_id)

            if success:
                # Publish all the data
                self.publish_pose(timestamp, translation_vector, head_rpy,
                                  subject_id)

                if self.visualise_headpose:
                    # pitch roll yaw
                    trans_msg = self.tf2_buffer.lookup_transform(
                        self.ros_tf_frame, self.tf_prefix +
                        "/head_pose_estimated" + str(subject_id), timestamp)
                    rotation = trans_msg.transform.rotation
                    euler_angles_head = list(
                        transformations.euler_from_quaternion(
                            [rotation.x, rotation.y, rotation.z, rotation.w]))
                    euler_angles_head = gaze_tools.limit_yaw(euler_angles_head)

                    face_image_resized = cv2.resize(
                        subject.face_color,
                        dsize=(224, 224),
                        interpolation=cv2.INTER_CUBIC)

                    final_head_pose_images.append(
                        LandmarkMethodROS.visualize_headpose_result(
                            face_image_resized,
                            gaze_tools.get_phi_theta_from_euler(
                                euler_angles_head)))

        if len(self.subject_tracker.get_tracked_elements().items()) > 0:
            self.publish_subject_list(
                timestamp, self.subject_tracker.get_tracked_elements())

        if len(final_head_pose_images) > 0:
            headpose_image_ros = self.bridge.cv2_to_imgmsg(
                np.hstack(final_head_pose_images), "bgr8")
            headpose_image_ros.header.stamp = timestamp
            self.subject_faces_pub.publish(headpose_image_ros)

    def get_head_pose(self, landmarks, subject_id):
        """
        We are given a set of 2D points in the form of landmarks. The basic idea is that we assume a generic 3D head
        model, and try to fit the 2D points to the 3D model based on the Levenberg-Marquardt optimization. We can use
        OpenCV's implementation of SolvePnP for this.
        This method is inspired by http://www.learnopencv.com/head-pose-estimation-using-opencv-and-dlib/
        We are planning to replace this with our own head pose estimator.
        :param landmarks: Landmark positions to be used to determine head pose
        :param subject_id: ID of the subject that corresponds to the given landmarks
        :return: success - Whether the pose was successfully extracted
                 rotation_vector - rotation vector that along with translation vector brings points from the model
                 coordinate system to the camera coordinate system
                 translation_vector - see rotation_vector
        """

        camera_matrix = self.img_proc.intrinsicMatrix()
        dist_coeffs = self.img_proc.distortionCoeffs()

        try:
            success, rodrigues_rotation, translation_vector, _ = cv2.solvePnPRansac(
                self.model_points,
                landmarks.reshape(len(self.model_points), 1, 2),
                cameraMatrix=camera_matrix,
                distCoeffs=dist_coeffs,
                flags=cv2.SOLVEPNP_DLS)

        except cv2.error as e:
            tqdm.write(
                '\033[2K\033[1;31mCould not estimate head pose: {}\033[0m'.
                format(e),
                end="\r")
            return False, None, None

        if not success:
            tqdm.write(
                '\033[2K\033[1;31mUnsuccessful in solvingPnPRanscan\033[0m',
                end="\r")
            return False, None, None

        # this is generic point stabiliser, the underlying representation doesn't matter
        rotation_vector, translation_vector = self.apply_kalman_filter_head_pose(
            subject_id, rodrigues_rotation, translation_vector / 1000.0)

        rotation_vector[0] += self.head_pitch

        _rotation_matrix, _ = cv2.Rodrigues(rotation_vector)
        _rotation_matrix = np.matmul(
            _rotation_matrix, np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]]))
        _m = np.zeros((4, 4))
        _m[:3, :3] = _rotation_matrix
        _m[3, 3] = 1
        _rpy_rotation = np.array(transformations.euler_from_matrix(_m))

        return success, _rpy_rotation, translation_vector

    def apply_kalman_filter_head_pose(self, subject_id,
                                      rotation_vector_unstable,
                                      translation_vector_unstable):
        stable_pose = []
        pose_np = np.array(
            (rotation_vector_unstable, translation_vector_unstable)).flatten()
        for value, ps_stb in zip(pose_np, self.pose_stabilizers[subject_id]):
            ps_stb.update([value])
            stable_pose.append(ps_stb.state[0])
        stable_pose = np.reshape(stable_pose, (-1, 3))
        rotation_vector = stable_pose[0]
        translation_vector = stable_pose[1]
        return rotation_vector, translation_vector

    def publish_subject_list(self, timestamp, subjects):
        assert (subjects is not None)

        subject_list_message = self.__subject_bridge.images_to_msg(
            subjects, timestamp)

        self.subject_pub.publish(subject_list_message)

    def publish_pose(self, timestamp, nose_center_3d_tf, head_rpy, subject_id):
        t = TransformStamped()
        t.header.frame_id = self.camera_frame
        t.header.stamp = timestamp
        t.child_frame_id = self.tf_prefix + "/head_pose_estimated" + str(
            subject_id)
        t.transform.translation.x = nose_center_3d_tf[0]
        t.transform.translation.y = nose_center_3d_tf[1]
        t.transform.translation.z = nose_center_3d_tf[2]

        rotation = transformations.quaternion_from_euler(*head_rpy)
        t.transform.rotation.x = rotation[0]
        t.transform.rotation.y = rotation[1]
        t.transform.rotation.z = rotation[2]
        t.transform.rotation.w = rotation[3]

        try:
            self.tf2_broadcaster.sendTransform([t])
        except rospy.ROSException as exc:
            if str(exc) == "publish() to a closed topic":
                pass
            else:
                raise exc

        self.tf2_buffer.set_transform(t, 'extract_landmarks')

    def update_subject_tracker(self, color_img):
        faceboxes = self.get_face_bb(color_img)
        if len(faceboxes) == 0:
            self.subject_tracker.clear_elements()
            return

        tracked_subjects = self.get_subjects_from_faceboxes(
            color_img, faceboxes)

        # track the new faceboxes according to the previous ones
        self.subject_tracker.track(tracked_subjects)
class ArmTCPTransformHandler:
    """
    This class uses a TransformListener to handle transforms related to the TCP.
    """
    def __init__(self):
        self.__tf_buffer = Buffer()
        self.__tf_listener = TransformListener(self.__tf_buffer)
        self.__static_broadcaster = StaticTransformBroadcaster()

    def ee_link_to_tcp_pose_target(self, pose, ee_link):
        try:
            transform_tcp_to_ee_link = self.__tf_buffer.lookup_transform(
                ee_link, "TCP", rospy.Time(0))
            transform_tcp_to_ee_link.header.frame_id = "ee_link_target"
            transform_tcp_to_ee_link.child_frame_id = "tcp_target"

            stamp = transform_tcp_to_ee_link.header.stamp
            transform_world_to_tcp_target = self.transform_from_pose(
                pose, "base_link", "ee_link_target", stamp)

            self.__tf_buffer.set_transform(transform_world_to_tcp_target,
                                           "default_authority")
            self.__tf_buffer.set_transform(transform_tcp_to_ee_link,
                                           "default_authority")

            ee_link_target_transform = self.__tf_buffer.lookup_transform(
                "base_link", "tcp_target", stamp)

            return self.pose_from_transform(ee_link_target_transform.transform)
        except ArmTCPTransformHandlerException:
            self.set_empty_tcp_to_ee_link_transform(ee_link)
            return pose

    def tcp_to_ee_link_pose_target(self, pose, ee_link):
        try:
            transform_tcp_to_ee_link = self.__tf_buffer.lookup_transform(
                "TCP", ee_link, rospy.Time(0))
            transform_tcp_to_ee_link.header.frame_id = "tcp_target"
            transform_tcp_to_ee_link.child_frame_id = "ee_link_target"

            stamp = transform_tcp_to_ee_link.header.stamp
            transform_world_to_tcp_target = self.transform_from_pose(
                pose, "base_link", "tcp_target", stamp)

            self.__tf_buffer.set_transform(transform_world_to_tcp_target,
                                           "default_authority")
            self.__tf_buffer.set_transform(transform_tcp_to_ee_link,
                                           "default_authority")

            ee_link_target_transform = self.__tf_buffer.lookup_transform(
                "base_link", "ee_link_target", stamp)

            return self.pose_from_transform(ee_link_target_transform.transform)
        except ArmTCPTransformHandlerException:
            self.set_empty_tcp_to_ee_link_transform(ee_link)
            return pose

    def tcp_to_ee_link_position_target(self, position, ee_link):
        pose = Pose(position, Quaternion(0, 0, 0, 1))
        return self.tcp_to_ee_link_pose_target(pose, ee_link).position

    def tcp_to_ee_link_quaternion_target(self, quaternion, ee_link):
        pose = Pose(Point(0, 0, 0), quaternion)
        return self.tcp_to_ee_link_pose_target(pose, ee_link).orientation

    def tcp_to_ee_link_rpy_target(self, roll, pitch, yaw, ee_link):
        qx, qy, qz, qw = quaternion_from_euler(roll, pitch, yaw)
        pose = Pose(Point(0, 0, 0), Quaternion(qx, qy, qz, qw))
        pose = self.tcp_to_ee_link_pose_target(pose, ee_link)
        new_roll, new_pitch, new_yaw = euler_from_quaternion(*pose.orientation)
        return new_roll, new_pitch, new_yaw

    def set_empty_tcp_to_ee_link_transform(self, ee_link):
        ee_link_to_tcp_transform = self.transform_from_pose(
            Pose(Point(0, 0, 0), Quaternion(0, 0, 0, 1)), ee_link, "TCP")
        self.__static_broadcaster.sendTransform(ee_link_to_tcp_transform)
        return ee_link_to_tcp_transform

    def lookup_transform(self, target_frame, source_frame, stamp=None):
        if stamp:
            return self.__tf_buffer.lookup_transform(target_frame,
                                                     source_frame, stamp)

        return self.__tf_buffer.lookup_transform(target_frame, source_frame,
                                                 rospy.Time(0))

    def display_target_pose(self, pose, name="target_pose"):
        t = self.transform_from_pose(pose, "base_link", name)
        self.__static_broadcaster.sendTransform(t)

    @staticmethod
    def transform_from_pose(pose, parent_frame, child_frame, stamp=None):
        t = TransformStamped()
        t.transform.translation.x = pose.position.x
        t.transform.translation.y = pose.position.y
        t.transform.translation.z = pose.position.z

        t.transform.rotation = pose.orientation

        t.header.frame_id = parent_frame
        t.child_frame_id = child_frame

        if stamp:
            t.header.stamp = stamp

        return t

    @staticmethod
    def pose_from_transform(transform):
        pose = Pose()
        pose.position.x = transform.translation.x
        pose.position.y = transform.translation.y
        pose.position.z = transform.translation.z
        pose.orientation = transform.rotation
        return pose
Example #26
0
class HealthMonitor():
    # main health monitor class

    heartbeats = [
        'lidar', 'seek', 'realsense_rgb', 'realsense_depth', 'transformed_imu',
        'localize', 'detection', 'detection_filtering', 'slam'
    ]
    totalBeats = len(heartbeats)

    def __init__(self):

        # status array
        self.healthLastTime = np.ones(self.totalBeats, dtype=int)
        self.healthPrevTime = np.zeros(self.totalBeats, dtype=int)
        self.healthTimeDiff = np.zeros(self.totalBeats, dtype=int)
        # fault dictionary
        self.healthDict = {}

        # how long before error is raised in seconds
        self.errorThreshold = float(rospy.get_param('~error_thres', '2.5'))
        # how long before node is restarted
        self.restartThreshold = float(rospy.get_param('~restart_thres', '5'))
        # whether or not to restart
        self.restartNodesFlag = bool(rospy.get_param('~restart_flag', 'False'))
        # health monitor rate
        self.timerFreq_ = float(rospy.get_param('~monitor_rate', '20'))

        # run the node
        self.rosInterface()

        # initialize the last recieved health time at the current time
        curTime = rospy.Time.now()
        self.healthLastTime *= curTime.to_nsec()

        while not rospy.is_shutdown():
            self.run()
            rospy.sleep(1.0 / self.timerFreq_)

    def run(self):
        # subtract current times from last times
        curTime = rospy.Time.now()
        self.healthTimeDiff = curTime.to_nsec() - self.healthLastTime
        self.slamCheck()
        # rospy.loginfo(self.healthTimeDiff)

        # convert thresholds to nanoseconds
        errorThres = int(self.errorThreshold * 1e9)

        # # see if an error has occured
        self.checkError = self.healthTimeDiff < errorThres

        # publish health status
        self.outputMsg.status_time = rospy.Time.now()
        self.outputMsg.status_array = self.checkError
        self.healthPub_.publish(self.outputMsg)

    def rosInterface(self):
        # global sensor hearbeat topics
        self.lidarTopic = str(rospy.get_param('lidar_beat', '/scan'))
        self.seekTopic = str(
            rospy.get_param('seek_beat', '/seek_camera/filteredImage'))
        self.realSenseRGBTopic = str(
            rospy.get_param('realsense_rgb_beat', '/camera/color/image_raw'))
        self.realSenseDepthTopic = str(
            rospy.get_param('realsense_depth_beat',
                            '/camera/depth/image_rect_raw'))
        self.transformedIMUTopic = str(
            rospy.get_param('transform_imu_beat', '/imu'))

        # global real time status update topics
        self.humanLocalizeTopic = str(
            rospy.get_param('human_beat', '/RelLocHeartbeat'))
        self.humanDetectionTopic = str(
            rospy.get_param('detection_beat', '/darknet_ros/detection_image'))
        self.humanFilterTopic = str(
            rospy.get_param('filter_beat', '/world_state'))

        # the heartbeats to listen to subscribers
        self.lidarSub_ = rospy.Subscriber(self.lidarTopic, LaserScan,
                                          self.lidarBeatCallback)
        self.seekSub_ = rospy.Subscriber(self.seekTopic, Image,
                                         self.seekCallback)
        self.realSenseRGBSub_ = rospy.Subscriber(self.realSenseRGBTopic, Image,
                                                 self.realSenseRGBCallback)
        self.realSenseDepthSub_ = rospy.Subscriber(self.realSenseDepthTopic,
                                                   Image,
                                                   self.realSenseDepthCallback)
        self.transformedIMUSub_ = rospy.Subscriber(self.transformedIMUTopic,
                                                   Imu, self.imuCallback)
        self.humanLocalizeSub_ = rospy.Subscriber(self.humanLocalizeTopic,
                                                  Empty,
                                                  self.humanLocalizeCallback)
        self.humanDetectionSub_ = rospy.Subscriber(self.humanDetectionTopic,
                                                   Image,
                                                   self.humanDetectionCallback)
        self.humanFilterSub_ = rospy.Subscriber(self.humanFilterTopic,
                                                WorldState,
                                                self.humanFilterCallback)

        # status publisher
        self.statusTopic_ = str(
            rospy.get_param("health_status", "/health_status"))
        self.outputMsg = watchStatus()
        self.healthPub_ = rospy.Publisher(self.statusTopic_,
                                          watchStatus,
                                          queue_size=10)

        # tf listener
        self.tfBuffer = Buffer()
        self.slamListener = TransformListener(self.tfBuffer)

    def slamCheck(self):
        try:
            transform = self.tfBuffer.lookup_transform("map", "base_link",
                                                       rospy.Time())
            self.callbackHandle(transform, 'slam')
        except (LookupException, ConnectivityException,
                ExtrapolationException):
            pass

    def callbackHandle(self, msg, system):
        # udpates the system descriptor's array entry with last message receieved time
        sys_index = self.heartbeats.index(system)
        self.healthLastTime[sys_index] = rospy.Time.now().to_nsec()

        # if want to ignore the time
        # self.checkError[sys_index] = True

    # TODO: see if I can use just one callback...
    def lidarBeatCallback(self, msg):
        self.callbackHandle(msg, 'lidar')

    def seekCallback(self, msg):
        self.callbackHandle(msg, 'seek')

    def realSenseRGBCallback(self, msg):
        self.callbackHandle(msg, 'realsense_rgb')

    def realSenseDepthCallback(self, msg):
        self.callbackHandle(msg, 'realsense_depth')

    def imuCallback(self, msg):
        self.callbackHandle(msg, 'transformed_imu')

    def humanLocalizeCallback(self, msg):
        self.callbackHandle(msg, 'localize')

    def humanDetectionCallback(self, msg):
        self.callbackHandle(msg, 'detection')

    def humanFilterCallback(self, msg):
        self.callbackHandle(msg, 'detection_filtering')
class Eval(object):
    def __init__(self):
        self.tfBuffer = Buffer(rospy.Duration(10))
        self.tf_listener = TransformListener(self.tfBuffer)
        self.barcode_pose_sub = rospy.Subscriber('barcode/pose',
                                                 Barcode,
                                                 self.barcode_sub,
                                                 queue_size=100)
        self.done = rospy.Service('barcode_evaluation/done', Trigger,
                                  self.done_cb)
        self.marker_pub = rospy.Publisher('wrong_codes', Marker, queue_size=10)
        self.barcodes = {}
        self.tp = {}
        self.fp = {}
        self.data = Data()
        self.load_ground_truth()
        rospy.loginfo('rdy to evaluate barcode detection')
        rospy.sleep(1)

    def make_marker(self, barcode_msg):
        ean = barcode_msg.barcode
        m = Marker()
        m.header = barcode_msg.barcode_pose.header
        m.pose = barcode_msg.barcode_pose.pose
        m.action = Marker.ADD
        m.type = Marker.CUBE
        m.scale = Vector3(.06, .06, .06)
        m.color = ColorRGBA(1, 0, 0, 1.0)

        text = Marker()
        text.header = barcode_msg.barcode_pose.header
        text.action = Marker.ADD
        text.type = Marker.TEXT_VIEW_FACING
        text.text = ean
        text.scale = Vector3(0, 0, .06)
        text.color = ColorRGBA(1, 0, 0, 1)
        text.pose.position.x = barcode_msg.barcode_pose.pose.position.x
        text.pose.position.y = barcode_msg.barcode_pose.pose.position.y
        text.pose.position.z = barcode_msg.barcode_pose.pose.position.z + 0.05

        m.text = ean
        m.ns = ean
        text.ns = ean
        m.id = 2
        self.marker_pub.publish(m)
        self.marker_pub.publish(text)

    def transformPose(self, target_frame, pose):
        transform = self.tfBuffer.lookup_transform(
            target_frame,
            pose.header.frame_id,  # source frame
            pose.header.stamp,  # get the tf at first available time
            rospy.Duration(1.0))
        new_pose = do_transform_pose(pose, transform)
        return new_pose

    def done_cb(self, req):
        self.data.save_to_file()
        resp = TriggerResponse()
        resp.success = True
        self.final_results()
        return resp

    def load_ground_truth(self):
        # data = json.load(open('../data/iai_shelfs_barcodes.json'))
        data = json.load(open('../data/ground_truth.json'))
        for shelf in data.values():
            for row in shelf:
                for barcode in row['barcode']:
                    self.barcodes[str(barcode['code'])[:-1]] = np.array([
                        -barcode['pose']['position']['x'],
                        0.515 - barcode['pose']['position']['y'],
                        0.16 + barcode['pose']['position']['z']
                    ])

    def final_results(self):
        tp = len(self.tp)
        fp = len(self.fp)
        fn = len(self.barcodes) - tp
        rospy.loginfo('tp: {}, fp: {}, fn: {}'.format(tp, fp, fn))

        self.tp = {}
        self.fp = {}

    def barcode_sub(self, data):
        inner_barcode = data.barcode[1:-1]
        transformed_pose = self.tfBuffer.lookup_transform(
            'map', data.barcode, rospy.Time(), timeout=rospy.Duration(4))
        data.barcode_pose = self.transformPose('map', data.barcode_pose)
        p_map = transformed_pose.transform.translation
        p = np.array([p_map.x, p_map.y, p_map.z])
        self.data.add_barcode(data)
        try:
            p2 = self.barcodes[inner_barcode]

            if inner_barcode not in self.tp:
                self.tp[inner_barcode] = p
                print('ground truth {}; predicted {}'.format(p2, p))
                rospy.loginfo('detected {} position diff: {}'.format(
                    inner_barcode, np.linalg.norm(p - p2)))
                print('-----')
        except KeyError as e:
            if inner_barcode not in self.fp:
                rospy.logwarn('unknown barcode {} at {} {}'.format(
                    inner_barcode, p, data))
                self.fp[inner_barcode] = p
                self.make_marker(data)
Example #28
0
class ARObjectsPerception(object):
    def __init__(self):
        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)
        self.tf_broadcaster = TransformBroadcaster()

        self.camera_info_topic = rospy.get_param("~camera_info_topic", "")
        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")

        self.resource_directory = rospy.get_param("~resource_directory", "")
        if self.resource_directory == "":
            raise ValueError(
                "Need to specify the '~resource_directory' parameter")

        self.ar_tags_config = rospy.get_param("~ar_tags_config", "")
        if self.ar_tags_config == "":
            raise ValueError("Need to specify the '~ar_tags_config' parameter")

        self.load_config(self.ar_tags_config)

        self.tracks = {}

        self.camera_info = None

        for entity_id, entity in self.config.items():
            track = uwds3_msgs.msg.SceneNode()
            track.label = self.config[entity_id]["label"]
            track.description = self.config[entity_id]["description"]
            track.is_located = True
            track.has_shape = True
            shape = uwds3_msgs.msg.PrimitiveShape()
            shape.type = uwds3_msgs.msg.PrimitiveShape.MESH
            position = self.config[entity_id]["position"]
            orientation = self.config[entity_id]["orientation"]
            q = quaternion_from_euler(orientation["x"], orientation["y"],
                                      orientation["z"], "rxyz")
            shape.pose.position.x = position["x"]
            shape.pose.position.y = position["y"]
            shape.pose.position.z = position["z"]
            shape.pose.orientation.x = q[0]
            shape.pose.orientation.y = q[1]
            shape.pose.orientation.z = q[2]
            shape.pose.orientation.w = q[3]
            shape.mesh_resource = "file://" + self.resource_directory + self.config[
                entity_id]["mesh_resource"]
            track.shapes = [shape]
            self.tracks[entity_id] = track

        self.camera_info_subscriber = rospy.Subscriber(
            self.camera_info_topic, sensor_msgs.msg.CameraInfo,
            self.camera_info_callback)

        self.ar_track_subscriber = rospy.Subscriber(
            "ar_pose_marker", ar_track_alvar_msgs.msg.AlvarMarkers,
            self.observation_callback)
        self.tracks_publisher = rospy.Publisher(
            "tracks", uwds3_msgs.msg.SceneChangesStamped, queue_size=1)

    def load_config(self, config_file_path):
        with open(config_file_path, "r") as file:
            self.config = yaml.load(file)

    def camera_info_callback(self, msg):
        if self.camera_info is None:
            rospy.loginfo("[ar_perception] Camera info received !")
        self.camera_info = msg
        self.camera_frame_id = msg.header.frame_id
        self.camera_matrix = np.array(msg.K).reshape((3, 3))
        self.dist_coeffs = np.array(msg.D)

    def observation_callback(self, ar_track_msg):
        scene_changes = uwds3_msgs.msg.SceneChangesStamped()
        scene_changes.world = "tracks"
        scene_changes.header = ar_track_msg.header
        scene_changes.header.stamp = rospy.Time().now()
        scene_changes.header.frame_id = self.global_frame_id
        if self.camera_info is not None:
            if len(ar_track_msg.markers) > 0:
                scene_changes.header.stamp = ar_track_msg.header.stamp
            for object in ar_track_msg.markers:
                success, view_pose = self.get_pose_from_tf2(
                    self.global_frame_id,
                    object.header.frame_id)  #, ar_track_msg.header.stamp)
                if success is True:
                    position = object.pose.pose.position
                    orientation = object.pose.pose.orientation
                    obj_sensor_pose = Vector6D(x=position.x,
                                               y=position.y,
                                               z=position.z).from_quaternion(
                                                   orientation.x,
                                                   orientation.y,
                                                   orientation.z,
                                                   orientation.w)
                    world_pose = view_pose + obj_sensor_pose
                    self.tracks[
                        object.id].pose_stamped.header = scene_changes.header
                    self.tracks[
                        object.
                        id].pose_stamped.pose.pose.position.x = world_pose.position(
                        ).x
                    self.tracks[
                        object.
                        id].pose_stamped.pose.pose.position.y = world_pose.position(
                        ).y
                    self.tracks[
                        object.
                        id].pose_stamped.pose.pose.position.z = world_pose.position(
                        ).z
                    q = world_pose.quaternion()
                    self.tracks[
                        object.id].pose_stamped.pose.pose.orientation.x = q[0]
                    self.tracks[
                        object.id].pose_stamped.pose.pose.orientation.y = q[1]
                    self.tracks[
                        object.id].pose_stamped.pose.pose.orientation.z = q[2]
                    self.tracks[
                        object.id].pose_stamped.pose.pose.orientation.w = q[3]
                    scene_changes.changes.nodes.append(self.tracks[object.id])
        self.tracks_publisher.publish(scene_changes)

    def get_pose_from_tf2(self, source_frame, target_frame, time=None):
        try:
            if time is not None:
                trans = self.tf_buffer.lookup_transform(
                    source_frame, target_frame, time)
            else:
                trans = self.tf_buffer.lookup_transform(
                    source_frame, target_frame, rospy.Time(0))
            x = trans.transform.translation.x
            y = trans.transform.translation.y
            z = trans.transform.translation.z

            rx = trans.transform.rotation.x
            ry = trans.transform.rotation.y
            rz = trans.transform.rotation.z
            rw = trans.transform.rotation.w
            pose = Vector6D(x=x, y=y, z=z).from_quaternion(rx, ry, rz, rw)
            return True, pose
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException) as e:
            rospy.logwarn("[perception] Exception occured: {}".format(e))
            return False, None
Example #29
0
class TfBridge(object):
    """ Utility class to interface with tf2 """
    def __init__(self, prefix=""):
        """ """
        self.tf_buffer = Buffer()
        self.prefix = prefix
        self.tf_listener = TransformListener(self.tf_buffer)
        self.tf_broadcaster = TransformBroadcaster()

    def publish_pose_to_tf(self,
                           pose,
                           source_frame,
                           target_frame,
                           header=None):
        """ Publish the given pose to /tf2 """
        msg = pose.to_msg()
        transform = TransformStamped()
        transform.child_frame_id = target_frame
        transform.header.frame_id = source_frame
        if header is not None:
            transform.header.stamp = header.stamp
        else:
            transform.header.stamp = rospy.Time().now()
        transform.transform.translation = msg.position
        transform.transform.rotation = msg.orientation
        self.tf_broadcaster.sendTransform(transform)

    def get_pose_from_tf(self, source_frame, target_frame, time=None):
        """ Get the pose from /tf2 """
        try:
            if time is not None:
                trans = self.tf_buffer.lookup_transform(
                    source_frame, target_frame, time)
            else:
                trans = self.tf_buffer.lookup_transform(
                    source_frame, target_frame, rospy.Time(0))
            x = trans.transform.translation.x
            y = trans.transform.translation.y
            z = trans.transform.translation.z

            rx = trans.transform.rotation.x
            ry = trans.transform.rotation.y
            rz = trans.transform.rotation.z
            rw = trans.transform.rotation.w
            pose = Vector6D(x=x, y=y, z=z).from_quaternion(rx, ry, rz, rw)
            return True, pose
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException) as e:
            rospy.logwarn("[tf_bridge] Exception occured: {}".format(e))
            return False, None

    def publish_tf_frames(self, tracks, events, header):
        for track in tracks:
            if track.is_located() is True and track.is_confirmed() is True:
                self.publish_pose_to_tf(track.pose,
                                        header.frame_id,
                                        self.prefix + track.id,
                                        header=header)
        for event in events:
            if event.is_located() is True:
                frame = event.subject + event.description + event.object
                pose = Vector6D(x=event.point.x,
                                y=event.point.y,
                                z=event.point.z)
                self.publish_pose_to_tf(pose,
                                        header.frame_id,
                                        self.prefix + frame,
                                        header=header)
                  '/land_param',
                  Float32MultiArray,
                  land_param_callback,
                  queue_size=1)
 cmd_vel_pub = rospy.Publisher('/xtdrone/' + vehicle_type + '_' +
                               vehicle_id + '/cmd_vel_enu',
                               Twist,
                               queue_size=1)
 cmd_pub = rospy.Publisher('/xtdrone/' + vehicle_type + '_' + vehicle_id +
                           '/cmd',
                           String,
                           queue_size=1)
 rate = rospy.Rate(20)
 while not rospy.is_shutdown():
     try:
         tfstamped = tfBuffer.lookup_transform('map', 'tag_' + vehicle_id,
                                               rospy.Time(0))
         get_time = False
         cmd_vel_enu.linear.x = Kp_xy * (tfstamped.transform.translation.x +
                                         x_bias -
                                         local_pose.pose.position.x)
         cmd_vel_enu.linear.y = Kp_xy * (tfstamped.transform.translation.y +
                                         y_bias -
                                         local_pose.pose.position.y)
         if land_flag:
             cmd_vel_enu.linear.z = -land_vel
         else:
             cmd_vel_enu.linear.z = Kp_z * (
                 tfstamped.transform.translation.z + z_bias -
                 local_pose.pose.position.z)
         print(cmd_vel_enu)
     except:
class RqtCalibrationEvaluator(Plugin):
    def __init__(self, context):
        super(RqtCalibrationEvaluator, self).__init__(context)
        # Give QObjects reasonable names
        self.setObjectName('CalibrationEvaluator')

        # Process standalone plugin command-line arguments
        from argparse import ArgumentParser
        parser = ArgumentParser()
        # Add argument(s) to the parser.
        parser.add_argument("-q",
                            "--quiet",
                            action="store_true",
                            dest="quiet",
                            help="Put plugin in silent mode")
        args, unknowns = parser.parse_known_args(context.argv())
        if not args.quiet:
            print('arguments: ', args)
            print('unknowns: ', unknowns)

        # Create QWidget
        self._widget = QWidget()
        self._infoWidget = QWidget()
        # Get path to UI file which should be in the "resource" folder of this package
        ui_file = os.path.join(rospkg.RosPack().get_path('rqt_easy_handeye'),
                               'resource', 'rqt_handeye_evaluator.ui')
        ui_info_file = os.path.join(
            rospkg.RosPack().get_path('rqt_easy_handeye'), 'resource',
            'rqt_handeye_info.ui')
        # Extend the widget with all attributes and children from UI file
        loadUi(ui_file, self._widget)
        loadUi(ui_info_file, self._infoWidget)
        self._widget.layout().insertWidget(0, self._infoWidget)
        # Give QObjects reasonable names
        self._widget.setObjectName('RqtHandeyeCalibrationUI')
        # Show _widget.windowTitle on left-top of each plugin (when
        # it's set in _widget). This is useful when you open multiple
        # plugins at once. Also if you open multiple instances of your
        # plugin at once, these lines add number to make it easy to
        # tell from pane to pane.
        if context.serial_number() > 1:
            self._widget.setWindowTitle(self._widget.windowTitle() +
                                        (' (%d)' % context.serial_number()))
        # Add widget to the user interface
        context.add_widget(self._widget)

        self._infoWidget.calibNameLineEdit.setText(rospy.get_namespace())
        if rospy.get_param('eye_on_hand', False):
            self._infoWidget.calibTypeLineEdit.setText("eye on hand")
        else:
            self._infoWidget.calibTypeLineEdit.setText("eye on base")
        self._infoWidget.trackingBaseFrameLineEdit.setText(
            rospy.get_param('tracking_base_frame', 'optical_origin'))
        self._infoWidget.trackingMarkerFrameLineEdit.setText(
            rospy.get_param('tracking_marker_frame', 'optical_target'))
        self._infoWidget.robotBaseFrameLineEdit.setText(
            rospy.get_param('robot_base_frame', 'base_link'))
        self._infoWidget.robotEffectorFrameLineEdit.setText(
            rospy.get_param('robot_effector_frame', 'tool0'))

        self.output_label = self._widget.label_message
        self.output_label.setText('Waiting for samples...')

        self._widget.pushButton_reset.clicked.connect(self.reset)

        self.is_eye_on_hand = rospy.get_param('~eye_on_hand')
        self.robot_base_frame = rospy.get_param('~robot_base_frame')
        self.robot_effector_frame = rospy.get_param('~robot_effector_frame')

        self.tracking_measurement_frame = rospy.get_param(
            '~tracking_marker_frame')
        if self.is_eye_on_hand:
            self.robot_measurement_frame = self.robot_base_frame
        else:
            self.robot_measurement_frame = self.robot_effector_frame

        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)

        self.update_timer = QTimer(self)
        self.update_timer.timeout.connect(self.tick)
        self.update_timer.start(500)

        self.last_robot_transform = None  # used to see if we are in a steady state

        self.measurement_transforms = [
        ]  # used to measure the quality of the calibration: should have the same value
        self.robot_transforms = [
        ]  # used to determine when to sample: we wait for a steady state that has not been sampled yet

        self._widget.show()

    def tick(self):
        # wait for steady state to avoid problems with lag
        # if next to a copy already in the dataset, tell to move
        # if could not sample transform, tell how old the last one is
        # show average error; show dataset in 3D?
        try:
            new_robot_transform = self.tf_buffer.lookup_transform(
                self.robot_base_frame, self.robot_effector_frame,
                rospy.Time.now(), rospy.Duration.from_sec(0.2))
            new_measurement_transform = self.tf_buffer.lookup_transform(
                self.robot_measurement_frame, self.tracking_measurement_frame,
                rospy.Time.now(), rospy.Duration.from_sec(0.2))
            if new_robot_transform is None:
                rospy.logwarn(
                    'Could not sample transform between {} and {}'.format(
                        self.robot_base_frame, self.robot_effector_frame))
            if new_measurement_transform is None:
                rospy.logwarn(
                    'Could not sample transform between {} and {}'.format(
                        self.robot_measurement_frame,
                        self.tracking_measurement_frame))
            if self.last_robot_transform is None:
                self.last_robot_transform = new_robot_transform
                self.updateUI()
                rospy.loginfo('Sampled first transform')
                return
            if RqtCalibrationEvaluator.transform_too_far(
                    new_robot_transform,
                    self.last_robot_transform,
                    absolute_tolerance=0.001):
                self.last_robot_transform = new_robot_transform
                msg = 'Waiting for steady state'
                rospy.loginfo(msg)
                self.output_label.setText(msg)
                return

            if self.robot_transform_is_too_close_to_previous_sample(
                    new_robot_transform, absolute_tolerance=0.003):
                self.updateUI()
                rospy.loginfo(
                    'Now we have {} samples\ntoo close to an old pose, move around!'
                    .format(len(self.measurement_transforms)))
                self.output_label.setText(
                    'Too close to an old pose, move around!')
                return

            self.robot_transforms.append(new_robot_transform)
            self.measurement_transforms.append(new_measurement_transform)
            rospy.loginfo('Appending transform; we got {} now'.format(
                len(self.measurement_transforms)))
            self.updateUI()
            # TODO: provide feedback if the data is sufficient
        except (LookupException, ExtrapolationException,
                ConnectivityException) as e:
            msg = 'Could not sample pose!'
            rospy.loginfo(msg)
            rospy.logerr(e)
            self.output_label.setText(msg)

    def reset(self):
        self.robot_transforms = []
        self.measurement_transforms = []
        self.updateUI()

    def updateUI(self):
        self._widget.spinBox_samples.setValue(len(self.measurement_transforms))
        if len(self.measurement_transforms) > 2:

            def translation_from_msg(msg):
                t = msg.transform.translation
                return t.x, t.y, t.z

            translations = [
                translation_from_msg(t) for t in self.measurement_transforms
            ]
            translations_np = np.array(translations)
            translations_avg = translations_np.mean(axis=0)
            translations_from_avg = translations_np - translations_avg
            translations_max_divergence = np.max(translations_from_avg)
            rospy.loginfo(
                "Maximum divergence: {}".format(translations_max_divergence))

            self._widget.doubleSpinBox_error.setEnabled(True)
            self._widget.doubleSpinBox_error.setValue(
                translations_max_divergence.max())
        else:
            self._widget.doubleSpinBox_error.setValue(0)
            self._widget.doubleSpinBox_error.setEnabled(False)

    @staticmethod
    def transform_to_concatenated_translation_quaternion(transform):
        tr = transform.transform.translation
        quat = transform.transform.rotation
        return np.array([tr.x, tr.y, tr.z, quat.x, quat.y, quat.z, quat.w])

    def robot_transform_is_too_close_to_previous_sample(
            self, new_robot_transform, absolute_tolerance):
        # TODO: use a meaningful metric
        posevec = RqtCalibrationEvaluator.transform_to_concatenated_translation_quaternion(
            new_robot_transform)
        for t in reversed(self.robot_transforms):
            old_posevec = RqtCalibrationEvaluator.transform_to_concatenated_translation_quaternion(
                t)
            if np.allclose(posevec, old_posevec, atol=absolute_tolerance):
                return True
        return False

    @staticmethod
    def transform_too_far(t1, t2, absolute_tolerance):
        # TODO: use a meaningful metric
        return not np.allclose(
            RqtCalibrationEvaluator.
            transform_to_concatenated_translation_quaternion(t1),
            RqtCalibrationEvaluator.
            transform_to_concatenated_translation_quaternion(t2),
            atol=absolute_tolerance)

    def shutdown_plugin(self):
        # TODO unregister all publishers here
        pass

    def save_settings(self, plugin_settings, instance_settings):
        # TODO save intrinsic configuration, usually using:
        # instance_settings.set_value(k, v)
        pass

    def restore_settings(self, plugin_settings, instance_settings):
        # TODO restore intrinsic configuration, usually using:
        # v = instance_settings.value(k)
        pass
Example #32
0
class ImcInterface(object):
    def __init__(self):
        self._estimated_state_pub = rospy.Publisher("imc/estimated_state",
                                                    EstimatedState,
                                                    queue_size=5)
        self._estimated_state_msg = EstimatedState()
        self._geo_converter = rospy.ServiceProxy("convert_points",
                                                 ConvertGeoPoints)
        self._plan_db = dict()
        self._current_plan = PlanDB()
        self._tf_buffer = Buffer()
        TransformListener(self._tf_buffer)
        rospy.Subscriber("gps", NavSatFix, self._handle_gps)
        rospy.Subscriber("odometry", Odometry, self._handle_pose)
        rospy.Subscriber("imc/goto_waypoint", Pose, self._handle_goto)
        rospy.Subscriber("imc/plan_control", PlanControl,
                         self._handle_plan_control)
        rospy.Subscriber("imc/abort", Empty, self._handle_abort)
        rospy.Subscriber("imc/imc_heartbeat", Empty,
                         self._handle_imc_heartbeat)
        rospy.Subscriber("imc/plan_db", PlanDB, self._handle_plan_db)
        rospy.Timer(rospy.Duration(1, 0), self._send_estimated_state)
        self._waypoint_serv = rospy.Service("load_waypoints", InitWaypointSet,
                                            self._send_goal)
        self._action_client = actionlib.SimpleActionClient(
            "follow_waypoints", FollowWaypointsAction)
        self._mode_client = rospy.ServiceProxy("controller/set_control_mode",
                                               SetControlMode)
        self._plan_db_pub = rospy.Publisher("imc/plan_db",
                                            PlanDB,
                                            queue_size=10)
        self._marker_pub = rospy.Publisher("current_waypoints",
                                           WaypointSet,
                                           queue_size=1)

    def _send_goal(self, req):
        res = InitWaypointSetResponse()
        res.success = False
        try:
            self._mode_client.wait_for_service(rospy.Duration(5))
            mode_req = SetControlModeRequest()
            mode_req.mode.mode = mode_req.mode.LOSGUIDANCE
            self._mode_client(mode_req)
            self._action_client.wait_for_server(rospy.Duration(5))
            goal = FollowWaypointsGoal()
            goal.waypoints.header = Header(0, rospy.Time.now(), "utm")
            goal.waypoints.start_time = Time(goal.waypoints.header.stamp)
            goal.waypoints.waypoints = req.waypoints
            self._marker_pub.publish(goal.waypoints)
            self._action_client.send_goal(goal, self._handle_done,
                                          self._handle_active,
                                          self._handle_feedback)
            res.success = True
            return res
        except Exception as e:
            rospy.logerr("{} | {}".format(rospy.get_name(), e.message))
            return res

    def _handle_done(self, status, result):
        rospy.loginfo(
            "{} | Action completed, status: {}\n Waypoints completed: {}".
            format(rospy.get_name(), status, result))

    def _handle_active(self):
        rospy.loginfo("{} | Waypoints set, Front Seat Driver active.".format(
            rospy.get_name()))

    def _handle_feedback(self, feedback):
        rospy.loginfo_throttle(10, "{} | {}".format(rospy.get_name(),
                                                    feedback))

    def _handle_goto(self, msg):
        """Parse if a goto message is received"""
        # TODO Not implemented in imc_ros_bridge yet
        rospy.loginfo(msg)
        pass

    def _parse_plan(self):
        # TODO this parser only accepts a set of goto commands
        """A plan has been requested by neptus to start. Parse the plan here and send to controller."""
        geopoints = []
        speeds = []
        is_depths = []
        for maneuver in self._current_plan.plan_spec.maneuvers:
            geopoints.append(
                GeoPoint(maneuver.maneuver.lat * 180.0 / np.pi,
                         maneuver.maneuver.lon * 180.0 / np.pi,
                         maneuver.maneuver.z))
            speeds.append(maneuver.maneuver.speed)
            is_depths.append(maneuver.maneuver.z_units == 1)
        req = ConvertGeoPointsRequest()
        req.geopoints = geopoints
        points = self._geo_converter.call(req).utmpoints
        wps = []
        # If datum is available, then reference frame is ENU World
        for point, speed, is_depth in zip(points, speeds, is_depths):
            wp = Waypoint()
            wp.point.x = point.x
            wp.point.y = point.y
            wp.point.z = -abs(point.z) if is_depth else abs(point.z)
            wp.header.stamp = rospy.Time.now()
            wp.header.frame_id = "utm"
            wp.radius_of_acceptance = 1.0
            wp.max_forward_speed = speed
            wp.use_fixed_heading = False
            wp.heading_offset = 0.0
            wps.append(wp)
        req = InitWaypointSetRequest()
        req.start_time = Time(rospy.Time.from_sec(0))
        req.start_now = True
        req.waypoints = wps
        req.max_forward_speed = max(speeds)
        req.interpolator.data = "lipb"
        req.heading_offset = 0
        self._send_goal(req)

    def _handle_plan_control(self, msg):
        """Parse requests to start or stop a plan."""
        typee = msg.type
        op = msg.op
        # request to start a mission!
        if typee == imc_enums.PLANDB_TYPE_REQUEST and op == imc_enums.PLANDB_OP_SET:
            self._current_plan = self._plan_db[msg.plan_id]
            self._parse_plan()
            req = SetControlModeRequest()
            req.mode.mode = req.mode.LOSGUIDANCE
            self._mode_client(req)
        # request to stop mission
        elif typee == imc_enums.PLANDB_TYPE_REQUEST and op == imc_enums.PLANDB_OP_DEL:
            self._current_plan = PlanDB()
            req = SetControlModeRequest()
            req.mode.mode = req.mode.HOLDPOSITION
            self._mode_client(req)

    def _handle_plan_db(self, msg):
        """Parse requests for Plan Database messages"""
        typee = msg.type
        op = msg.op
        # request for plan info
        if typee == imc_enums.PLANDB_TYPE_REQUEST and op == imc_enums.PLANDB_OP_GET_INFO:
            # we need to respond to this with some info... but what?
            rospy.loginfo_throttle_identical(
                30, "Got REQUEST GET_INFO planDB msg from Neptus for plan: {}".
                format(str(msg.plan_id)))
            response = PlanDB()
            response.plan_id = self._plan_db[msg.plan_id].plan_id
            response.type = imc_enums.PLANDB_TYPE_SUCCESS
            response.op = imc_enums.PLANDB_OP_GET_INFO
            response.plandb_information = self._plan_db[
                msg.plan_id].plandb_information
            self._plan_db_pub.publish(response)
            rospy.loginfo_throttle_identical(
                30, "Answered GET_INFO for plan:" + str(response.plan_id))
        # request for plan state
        elif typee == imc_enums.PLANDB_TYPE_REQUEST and op == imc_enums.PLANDB_OP_GET_STATE:
            rospy.loginfo_throttle_identical(
                30, "Got REQUEST GET_STATE planDB msg from Neptus")
            response = PlanDB()
            response.plan_id = self._current_plan.plan_id
            response.type = imc_enums.PLANDB_TYPE_SUCCESS
            response.op = imc_enums.PLANDB_OP_GET_STATE
            response.plandb_state = PlanDBState()
            response.plandb_state.plan_count = len(self._plan_db)
            response.plandb_state.plans_info = []
            totalSize = 0
            latest = None
            # TODO Neptus needs an md5 calculation on the plans_info message to be stored in plandb_state.md5 in order
            #  to sync, but we cannot seem to get that right for now.
            #  https://github.com/LSTS/imcjava/blob/d95fddeab4c439e603cf5e30a32979ad7ace5fbc/src/java/pt/lsts/imc/adapter/PlanDbManager.java#L160
            #  See above for an example
            #  It seems like we need to keep a planDB ourselves on this side, collect all the plans we
            #  received and answer this get_state with data from them all to get a properly synchronized plan.
            buffer = StringIO(
            )  # This buffer method for calculating md5 sum of a ros message is not ideal.
            md = hashlib.md5()
            for p in self._plan_db.values():
                pdi = p.plandb_information
                totalSize += pdi.plan_size
                response.plandb_state.plans_info.append(pdi)
                latest = latest if latest is None else max(
                    latest, pdi.change_time)
                pdi.serialize(buffer)
                [md.update(i) for i in buffer.buflist]
            response.plandb_state.plan_count = len(
                response.plandb_state.plans_info)
            response.plandb_state.plan_size = totalSize
            response.plandb_state.md5 = md.digest()
            self._plan_db_pub.publish(response)
            rospy.loginfo_throttle_identical(30,
                                             "Answered GET_STATE for plans.")
        # ack for plan set success
        elif typee == imc_enums.PLANDB_TYPE_SUCCESS and op == imc_enums.PLANDB_OP_SET:
            rospy.loginfo_throttle_identical(
                20, "Received SUCCESS for plandb set")
        # ack for plan get info
        elif typee == imc_enums.PLANDB_TYPE_SUCCESS and op == imc_enums.PLANDB_OP_GET_INFO:
            rospy.loginfo_throttle_identical(
                20, "Received SUCCESS for plandb get info")
        # ack for plan get state
        elif typee == imc_enums.PLANDB_TYPE_SUCCESS and op == imc_enums.PLANDB_OP_GET_STATE:
            rospy.loginfo_throttle_identical(
                20, "Received SUCCESS for plandb get state")
        # request to set plan
        elif op == imc_enums.PLANDB_OP_SET:
            # update the plan_info and plan_state attributes
            msg.plandb_information.plan_id = msg.plan_id
            msg.plandb_information.plan_size = len(msg.plan_spec.maneuvers)
            msg.plandb_information.change_time = time.time()
            msg.plandb_information.md5 = msg.plan_spec_md5
            self._plan_db.update({msg.plan_spec.plan_id: msg
                                  })  # place the plan in a dictionary of plans
            # send a success response
            response = PlanDB()
            response.type = imc_enums.PLANDB_TYPE_SUCCESS
            response.op = imc_enums.PLANDB_OP_SET
            response.plan_id = msg.plan_spec.plan_id
            self._plan_db_pub.publish(response)
        # handle other requests
        else:
            rospy.loginfo_throttle_identical(
                5, "Received some unhandled planDB message:\n" + str(msg))

    def _handle_abort(self, msg):
        """Listen for the abort message, do something if it comes :)"""
        self._action_client.wait_for_server()
        if self._action_client.get_state() == GoalStatus.ACTIVE:
            self._action_client.cancel_goal()
        self._mode_client.wait_for_service()
        req = SetControlModeRequest()
        req.mode.mode = req.mode.ABORT
        self._mode_client(req)

    def _handle_imc_heartbeat(self, msg):
        """Here in case you want to do something on the heartbeat of the IMC"""
        pass

    def _handle_gps(self, msg):
        # neptus expects latitude and longitude to be in radians
        self._estimated_state_msg.lat = msg.latitude * np.pi / 180.0
        self._estimated_state_msg.lon = msg.longitude * np.pi / 180.0
        self._estimated_state_msg.alt = msg.altitude

    def _handle_pose(self, msg):
        if self._tf_buffer.can_transform("bluerov2/base_link", "map_ned",
                                         rospy.Time.now(),
                                         rospy.Duration.from_sec(0.5)):
            tf = self._tf_buffer.lookup_transform("bluerov2/base_link",
                                                  "map_ned", rospy.Time.now(),
                                                  rospy.Duration.from_sec(0.5))
        else:
            return
        # TODO neptus gets confused if you try to give all available estimates, there is probably a transformation problem
        # self._estimated_state_msg.x = tf.transform.translation.x
        # self._estimated_state_msg.y = tf.transform.translation.y
        # self._estimated_state_msg.z = tf.transform.translation.z
        # self._estimated_state_msg.depth = tf.transform.translation.z
        # self._estimated_state_msg.height = msg.pose.pose.position.z
        R = Rotation([
            tf.transform.rotation.x, tf.transform.rotation.y,
            tf.transform.rotation.z, tf.transform.rotation.w
        ])
        self._estimated_state_msg.phi, self._estimated_state_msg.theta, self._estimated_state_msg.psi = R.as_euler(
            "xyz", False).squeeze()
        # self._estimated_state_msg.u = msg.twist.twist.linear.x
        # self._estimated_state_msg.v = -msg.twist.twist.linear.y
        # self._estimated_state_msg.w = -msg.twist.twist.linear.z
        # self._estimated_state_msg.p = msg.twist.twist.angular.x
        # self._estimated_state_msg.q = -msg.twist.twist.angular.y
        # self._estimated_state_msg.r = -msg.twist.twist.angular.z
        # u = np.array([msg.twist.twist.linear.x,msg.twist.twist.linear.y,msg.twist.twist.linear.z])
        # self._estimated_state_msg.vx, self._estimated_state_msg.vy, self._estimated_state_msg.vz = R.apply(u, True)

    def _send_estimated_state(self, event):
        self._estimated_state_pub.publish(self._estimated_state_msg)
Example #33
0
class TabletopPerception(object):
    def __init__(self):
        self.tf_buffer = Buffer()
        self.tf_listener = TransformListener(self.tf_buffer)
        self.tf_broadcaster = TransformBroadcaster()

        self.rgb_image_topic = rospy.get_param("~rgb_image_topic",
                                               "/camera/rgb/image_raw")
        self.rgb_camera_info_topic = rospy.get_param(
            "~rgb_camera_info_topic", "/camera/rgb/camera_info")

        self.depth_image_topic = rospy.get_param("~depth_image_topic",
                                                 "/camera/depth/image_raw")
        self.depth_camera_info_topic = rospy.get_param(
            "~depth_camera_info_topic", "/camera/depth/camera_info")

        self.base_frame_id = rospy.get_param("~base_frame_id", "base_link")
        self.global_frame_id = rospy.get_param("~global_frame_id", "odom")

        self.bridge = CvBridge()

        rospy.loginfo(
            "[tabletop_perception] Subscribing to '/{}' topic...".format(
                self.depth_camera_info_topic))
        self.camera_info = None
        self.camera_frame_id = None
        self.camera_info_subscriber = rospy.Subscriber(
            self.rgb_camera_info_topic, CameraInfo, self.camera_info_callback)

        detector_model_filename = rospy.get_param("~detector_model_filename",
                                                  "")
        detector_weights_filename = rospy.get_param(
            "~detector_weights_filename", "")
        detector_config_filename = rospy.get_param("~detector_config_filename",
                                                   "")

        face_detector_model_filename = rospy.get_param(
            "~face_detector_model_filename", "")
        face_detector_weights_filename = rospy.get_param(
            "~face_detector_weights_filename", "")
        face_detector_config_filename = rospy.get_param(
            "~face_detector_config_filename", "")

        self.detector = SSDDetector(detector_model_filename,
                                    detector_weights_filename,
                                    detector_config_filename, 300)

        self.face_detector = SSDDetector(face_detector_model_filename,
                                         face_detector_weights_filename,
                                         face_detector_config_filename, 300)

        self.foreground_detector = ForegroundDetector(interactive_mode=False)

        shape_predictor_config_filename = rospy.get_param(
            "~shape_predictor_config_filename", "")
        self.facial_landmarks_estimator = FacialLandmarksEstimator(
            shape_predictor_config_filename)
        face_3d_model_filename = rospy.get_param("~face_3d_model_filename", "")
        self.head_pose_estimator = HeadPoseEstimator(face_3d_model_filename)

        facial_features_model_filename = rospy.get_param(
            "~facial_features_model_filename", "")
        self.facial_features_estimator = FacialFeaturesEstimator(
            face_3d_model_filename, facial_features_model_filename)

        self.color_features_estimator = ColorFeaturesEstimator()

        self.n_frame = rospy.get_param("~n_frame", 4)
        self.frame_count = 0

        self.publish_tf = rospy.get_param("~publish_tf", True)
        self.publish_visualization_image = rospy.get_param(
            "~publish_visualization_image", True)

        self.n_init = rospy.get_param("~n_init", 1)
        self.max_iou_distance = rospy.get_param("~max_iou_distance", 0.8)
        self.max_color_distance = rospy.get_param("~max_color_distance", 0.8)
        self.max_face_distance = rospy.get_param("~max_face_distance", 0.8)
        self.max_centroid_distance = rospy.get_param("~max_centroid_distance",
                                                     0.8)
        self.max_disappeared = rospy.get_param("~max_disappeared", 7)
        self.max_age = rospy.get_param("~max_age", 10)

        self.object_tracker = MultiObjectTracker(iou_cost,
                                                 color_cost,
                                                 0.7,
                                                 self.max_color_distance,
                                                 10,
                                                 5,
                                                 self.max_age,
                                                 use_appearance_tracker=True)

        self.face_tracker = MultiObjectTracker(iou_cost,
                                               centroid_cost,
                                               self.max_iou_distance,
                                               None,
                                               self.n_init,
                                               self.max_disappeared,
                                               self.max_age,
                                               use_appearance_tracker=False)

        self.person_tracker = MultiObjectTracker(iou_cost,
                                                 color_cost,
                                                 self.max_iou_distance,
                                                 self.max_color_distance,
                                                 self.n_init,
                                                 self.max_disappeared,
                                                 self.max_age,
                                                 use_appearance_tracker=False)

        self.shape_estimator = ShapeEstimator()

        self.object_pose_estimator = ObjectPoseEstimator()

        self.tracks_publisher = rospy.Publisher("tracks",
                                                SceneChangesStamped,
                                                queue_size=1)

        self.visualization_publisher = rospy.Publisher("tracks_image",
                                                       Image,
                                                       queue_size=1)

        self.use_depth = rospy.get_param("~use_depth", False)
        if self.use_depth is True:
            rospy.loginfo(
                "[tabletop_perception] Subscribing to '/{}' topic...".format(
                    self.rgb_image_topic))
            self.rgb_image_sub = message_filters.Subscriber(
                self.rgb_image_topic, Image)
            rospy.loginfo(
                "[tabletop_perception] Subscribing to '/{}' topic...".format(
                    self.depth_image_topic))
            self.depth_image_sub = message_filters.Subscriber(
                self.depth_image_topic, Image)

            self.sync = message_filters.ApproximateTimeSynchronizer(
                [self.rgb_image_sub, self.depth_image_sub],
                10,
                0.2,
                allow_headerless=True)
            self.sync.registerCallback(self.observation_callback)
        else:
            rospy.loginfo(
                "[tabletop_perception] Subscribing to '/{}' topic...".format(
                    self.rgb_image_topic))
            self.rgb_image_sub = rospy.Subscriber(self.rgb_image_topic,
                                                  Image,
                                                  self.observation_callback,
                                                  queue_size=1)

    def camera_info_callback(self, msg):
        if self.camera_info is None:
            rospy.loginfo("[tabletop_perception] Camera info received !")
        self.camera_info = msg
        self.camera_frame_id = msg.header.frame_id
        self.camera_matrix = np.array(msg.K).reshape((3, 3))
        self.dist_coeffs = np.array(msg.D)

    def observation_callback(self, bgr_image_msg, depth_image_msg=None):
        if self.camera_info is not None:
            perception_timer = cv2.getTickCount()
            bgr_image = self.bridge.imgmsg_to_cv2(bgr_image_msg, "bgr8")
            rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
            if depth_image_msg is not None:
                depth_image = self.bridge.imgmsg_to_cv2(depth_image_msg)
            else:
                depth_image = None
            if self.publish_visualization_image is True:
                viz_frame = bgr_image

            _, image_height, image_width = bgr_image.shape

            success, view_pose = self.get_pose_from_tf2(
                self.global_frame_id, self.camera_frame_id)

            if success is not True:
                rospy.logwarn(
                    "[tabletop_perception] The camera sensor is not localized in world space (frame '{}'), please check if the sensor frame is published in /tf"
                    .format(self.global_frame_id))
            else:
                view_matrix = view_pose.transform()

                self.frame_count %= self.n_frame
                ######################################################
                # Detection
                ######################################################

                detections = []
                if self.frame_count == 0:
                    detections = self.detector.detect(rgb_image,
                                                      depth_image=depth_image)
                    object_detections = self.foreground_detector.detect(
                        rgb_image,
                        depth_image=depth_image,
                        prior_detections=detections)
                    detections += object_detections
                elif self.frame_count == 1:
                    detections = self.face_detector.detect(
                        rgb_image, depth_image=depth_image)
                else:
                    detections = []

                ####################################################################
                # Features estimation
                ####################################################################

                self.color_features_estimator.estimate(rgb_image, detections)

                ######################################################
                # Tracking
                ######################################################

                if self.frame_count == 0:
                    object_detections = [
                        d for d in detections if d.label != "person"
                    ]
                    person_detections = [
                        d for d in detections if d.label == "person"
                    ]
                    face_tracks = self.face_tracker.update(rgb_image, [])
                    object_tracks = self.object_tracker.update(
                        rgb_image, object_detections)
                    person_tracks = self.person_tracker.update(
                        rgb_image, person_detections)
                elif self.frame_count == 1:
                    face_tracks = self.face_tracker.update(
                        rgb_image, detections)
                    object_tracks = self.object_tracker.update(rgb_image, [])
                    person_tracks = self.person_tracker.update(rgb_image, [])
                else:
                    face_tracks = self.face_tracker.update(rgb_image, [])
                    object_tracks = self.object_tracker.update(rgb_image, [])
                    person_tracks = self.person_tracker.update(rgb_image, [])

                tracks = face_tracks + object_tracks + person_tracks

                ########################################################
                # Pose & Shape estimation
                ########################################################

                self.facial_landmarks_estimator.estimate(
                    rgb_image, face_tracks)

                self.head_pose_estimator.estimate(face_tracks, view_matrix,
                                                  self.camera_matrix,
                                                  self.dist_coeffs)
                self.object_pose_estimator.estimate(
                    object_tracks + person_tracks, view_matrix,
                    self.camera_matrix, self.dist_coeffs)

                self.shape_estimator.estimate(rgb_image, tracks,
                                              self.camera_matrix,
                                              self.dist_coeffs)

                ########################################################
                # Recognition
                ########################################################

                self.facial_features_estimator.estimate(rgb_image, face_tracks)

                self.frame_count += 1
                ######################################################
                # Visualization of debug image
                ######################################################

                perception_fps = cv2.getTickFrequency() / (cv2.getTickCount() -
                                                           perception_timer)

                cv2.rectangle(viz_frame, (0, 0), (250, 40), (200, 200, 200),
                              -1)
                perception_fps_str = "Perception fps : {:0.1f}hz".format(
                    perception_fps)
                cv2.putText(
                    viz_frame, "Nb detections/tracks : {}/{}".format(
                        len(detections), len(tracks)), (5, 15),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
                cv2.putText(viz_frame, perception_fps_str, (5, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)

                scene_changes = SceneChangesStamped()
                scene_changes.world = "tracks"

                header = bgr_image_msg.header
                header.frame_id = self.global_frame_id
                scene_changes.header = header

                for track in tracks:
                    if self.publish_visualization_image is True:
                        track.draw(viz_frame, (230, 0, 120, 125), 1,
                                   view_matrix, self.camera_matrix,
                                   self.dist_coeffs)
                    if track.is_confirmed():
                        scene_node = track.to_msg(header)
                        if self.publish_tf is True:
                            if track.is_located() is True:
                                self.publish_tf_pose(scene_node.pose_stamped,
                                                     self.global_frame_id,
                                                     scene_node.id)
                        scene_changes.changes.nodes.append(scene_node)
                if self.publish_visualization_image is True:
                    viz_img_msg = self.bridge.cv2_to_imgmsg(viz_frame)
                    self.visualization_publisher.publish(viz_img_msg)

                self.tracks_publisher.publish(scene_changes)

    def get_pose_from_tf2(self, source_frame, target_frame, time=None):
        try:
            if time is not None:
                trans = self.tf_buffer.lookup_transform(
                    source_frame, target_frame, time)
            else:
                trans = self.tf_buffer.lookup_transform(
                    source_frame, target_frame, rospy.Time(0))
            x = trans.transform.translation.x
            y = trans.transform.translation.y
            z = trans.transform.translation.z

            rx = trans.transform.rotation.x
            ry = trans.transform.rotation.y
            rz = trans.transform.rotation.z
            rw = trans.transform.rotation.w
            pose = Vector6D(x=x, y=y, z=z).from_quaternion(rx, ry, rz, rw)
            return True, pose
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException) as e:
            rospy.logwarn("[perception] Exception occured: {}".format(e))
            return False, None

    def publish_tf_pose(self, pose_stamped, source_frame, target_frame):
        transform = TransformStamped()
        transform.child_frame_id = target_frame
        transform.header.frame_id = source_frame
        transform.header.stamp = pose_stamped.header.stamp
        transform.transform.translation = pose_stamped.pose.pose.position
        transform.transform.rotation = pose_stamped.pose.pose.orientation
        self.tf_broadcaster.sendTransform(transform)