def get_frame_offset(self, oframe1, oframe2):
        tf_done = False
        tfl = tf.TransformListener()

        while not tf_done:
            try:
                (trans1, rot1) = tfl.lookupTransform(oframe1, oframe2, rospy.Time(0))
                tf_done = True
            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue

        
        tf_done = False

        while not tf_done:
            try:
                (trans2, rot2) = tfl.lookupTransform("/world", self.frame, rospy.Time(0))
                tf_done = True
            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
                continue

        print (trans1, rot1)
        print (trans2, rot2)

        f1 = tfc.fromTf((trans1, rot1))
        f2 = tfc.fromTf((trans2, rot2))

        frame = f2 * f1

        self.transform = tfc.toTf(frame)

        tfb = tf.TransformBroadcaster()
        (t, r) = self.transform
        tfb.sendTransform(t, r, rospy.Time.now(), "EX", "/world")
        self.frame = "EX"
 def servo_fn(self,val,*args):
     if self.driver_status != 'SERVO':
         rospy.logwarn('ROBOT NOT IN SERVO MODE')
         return
     else:
         rospy.wait_for_service('/simple_ur_msgs/ServoToPose')
         try:
             pose_servo_proxy = rospy.ServiceProxy('/simple_ur_msgs/ServoToPose',ServoToPose)
             
             F_target_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/target_frame',rospy.Time(0)))
             F_target_base = tf_c.fromTf(self.listener_.lookupTransform('/base_link','/target_frame',rospy.Time(0)))
             F_base_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/base_link',rospy.Time(0)))
             F_command = F_base_world.Inverse()*F_target_world
                 
             msg = ServoToPoseRequest()
             msg.target = tf_c.toMsg(F_command)
             msg.accel = .7
             msg.vel = .3
             # Send Servo Command
             rospy.logwarn('Single Servo Move Started')
             result = pose_servo_proxy(msg)
             rospy.logwarn('Single Servo Move Finished')
             rospy.logwarn(str(result.ack))
         except rospy.ServiceException, e:
             print e
    def update(self):
        if not self.calibrated:
            try:
                T_camera_to_marker = tf_c.fromTf(
                    self.tf_listen.lookupTransform('camera_link',
                                                   'ar_marker_0',
                                                   rospy.Time(0)))
                T_base_endpoint = tf_c.fromTf(
                    self.tf_listen.lookupTransform('base_link', 'endpoint',
                                                   rospy.Time(0)))
                T_base_camera_current = tf_c.fromTf(
                    self.tf_listen.lookupTransform('base_link', 'camera_link',
                                                   rospy.Time(0)))
                T_endpoint_marker = tf_c.fromTf(
                    self.tf_listen.lookupTransform('endpoint',
                                                   'endpoint_marker',
                                                   rospy.Time(0)))

                T_base_camera = T_base_endpoint * T_endpoint_marker * T_camera_to_marker.Inverse(
                )

                # Extract position and rotation for the new transformation
                xyz = tf_c.toTf(T_base_camera)[0]
                rpy = T_base_camera.M.GetRPY()

                # Call to service ###########
                # First check to see if service exists
                try:
                    rospy.wait_for_service(
                        '/semi_static_transform_publisher/UpdateTransform')
                except rospy.ROSException as e:
                    rospy.logerr(
                        'Could not find semi-static transform service')
                    return
                # Make servo call to set pose
                try:
                    # Setup transform to send to sem-static node
                    update_transform_proxy = rospy.ServiceProxy(
                        '/semi_static_transform_publisher/UpdateTransform',
                        UpdateTransform)
                    msg = UpdateTransformRequest()
                    msg.x, msg.y, msg.z = xyz
                    msg.roll, msg.pitch, msg.yaw = rpy

                    # Call to service
                    update_transform_proxy(msg)

                    self.calibrated = True
                except (rospy.ServiceException), e:
                    rospy.logwarn('There was a problem with the service:')
                    rospy.logwarn(e)

            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException) as e:
                rospy.logwarn(e)
                return
 def add_waypoint(self):
     if self.new_waypoint_name:
         try:
             F_waypoint = tf_c.fromTf(
                 self.listener_.lookupTransform('/world', '/endpoint',
                                                rospy.Time(0)))
         except (tf.LookupException, tf.ConnectivityException,
                 tf.ExtrapolationException) as e:
             rospy.logerr(
                 'Could not find the tf frame for the robot endpoint')
             return
         try:
             rospy.wait_for_service('/instructor_core/AddWaypoint', 2)
         except rospy.ROSException as e:
             rospy.logerr(e)
             return
         try:
             add_waypoint_proxy = rospy.ServiceProxy(
                 '/instructor_core/AddWaypoint', AddWaypoint)
             msg = AddWaypointRequest()
             msg.name = '/' + self.new_waypoint_name
             msg.world_pose = tf_c.toMsg(F_waypoint)
             rospy.loginfo(add_waypoint_proxy(msg))
             self.update_waypoints()
         except rospy.ServiceException, e:
             rospy.logerr(e)
Ejemplo n.º 5
0
    def update(self):
        for name, waypoint in self.waypoints.items():
            F = tf_c.fromMsg(waypoint)
            self.broadcaster_.sendTransform(tuple(F.p),
                                            tuple(F.M.GetQuaternion()),
                                            rospy.Time.now(), name, '/world')

        for name, waypoint_data in self.relative_waypoints.items():
            try:
                F_relative_frame_waypoint = tf_c.fromMsg(waypoint_data[0])
                relative_frame_name = waypoint_data[1]
                F_world_relative_frame = tf_c.fromTf(
                    self.listener_.lookupTransform('/world',
                                                   relative_frame_name,
                                                   rospy.Time(0)))
                F_relative_waypoint = F_world_relative_frame * F_relative_frame_waypoint
                self.broadcaster_.sendTransform(
                    tuple(F_relative_waypoint.p),
                    tuple(F_relative_waypoint.M.GetQuaternion()),
                    rospy.Time.now(), name, '/world')
                self.F_relative_waypoint_last = F_relative_waypoint
            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException) as e:
                if self.F_relative_waypoint_last == None:
                    pass
                else:
                    self.broadcaster_.sendTransform(
                        tuple(self.F_relative_waypoint_last.p),
                        tuple(self.F_relative_waypoint_last.M.GetQuaternion()),
                        rospy.Time.now(), name, '/world')

        self.check_landmarks()
Ejemplo n.º 6
0
def transform2pose(transform, frame_id=None, time=None):
    msg = PoseStampedMsg()
    T = tf_conversions.fromTf(transform)
    msg.pose = tf_conversions.toMsg(T)
    msg.header.frame_id = frame_id
    msg.header.stamp = time
    return msg.pose if time is None else msg
    def add_frame_to_proto(self, frame_id, proto_msg):
        pose_msg = None
        try:
            self.listener.waitForTransform(self.graspit_frame, frame_id,
                                           rospy.Time(0), rospy.Duration(100))

            pose_msg = tf_conversions.toMsg(
                tf_conversions.fromTf(
                    self.listener.lookupTransform(self.graspit_frame, frame_id,
                                                  rospy.Time(0))))

        except Exception as e:
            rospy.logerr(
                self.__class__.__name__ + "::" +
                "Failed to lookup frame transform from %s to %s -- %s" %
                (self.graspit_frame, frame_id, e.message))
        if pose_msg:
            frame_proto = proto_msg.renderable.frame
            frame_proto.frame_id = frame_id
            frame_proto.orientation.w = pose_msg.orientation.w
            frame_proto.orientation.x = pose_msg.orientation.x
            frame_proto.orientation.y = pose_msg.orientation.y
            frame_proto.orientation.z = pose_msg.orientation.z

            frame_proto.translation.x = pose_msg.position.x
            frame_proto.translation.y = pose_msg.position.y
            frame_proto.translation.z = pose_msg.position.z
            frame_proto.units = 1.0

            proto_msg.renderable.renderableFrame = frame_id.strip('/')

        return proto_msg
    def add_frame_to_proto(self, frame_id, proto_msg):
        pose_msg = None
        try:
            self.listener.waitForTransform(self.graspit_frame, frame_id, rospy.Time(0), rospy.Duration(100))

            pose_msg = tf_conversions.toMsg(tf_conversions.fromTf(self.listener.lookupTransform(self.graspit_frame,
                                                                                                frame_id,
                                                                                              rospy.Time(0))))

        except Exception as e:
            rospy.logerr(self.__class__.__name__ + "::" +
                             "Failed to lookup frame transform from %s to %s -- %s"%(self.graspit_frame,
                                                                                     frame_id, e.message))
        if pose_msg:
            frame_proto = proto_msg.renderable.frame
            frame_proto.frame_id = frame_id
            frame_proto.orientation.w = pose_msg.orientation.w
            frame_proto.orientation.x = pose_msg.orientation.x
            frame_proto.orientation.y = pose_msg.orientation.y
            frame_proto.orientation.z = pose_msg.orientation.z

            frame_proto.translation.x = pose_msg.position.x
            frame_proto.translation.y = pose_msg.position.y
            frame_proto.translation.z = pose_msg.position.z
            frame_proto.units = 1.0

            proto_msg.renderable.renderableFrame = frame_id.strip('/')

        return proto_msg
    def make_pointcloud_proto(self, pointcloud_msg):
        """
        :param pointcloud_msg: input sensor_msgs.msg.PointCloud2
        :type pointcloud_msg: sensor_msgs.msg.PointCloud2
        :rtype: gen_proto.GraspitMessage_pb2.GraspitProtobufMessage
        """
        pointcloud_transform = tf_conversions.toMatrix(
            tf_conversions.fromTf(
                self.listener.lookupTransform(self.graspit_frame,
                                              pointcloud_msg.header.frame_id,
                                              rospy.Time(0))))
        rospy.loginfo(self.__class__.__name__ + " is relaying message")
        if self.downsample_factor != 1:
            self.create_uvs(pointcloud_msg)
        points = sensor_msgs.point_cloud2.read_points(pointcloud_msg, None,
                                                      False, self.uvs)
        #points = sensor_msgs.point_cloud2.read_points(pointcloud_msg)

        gm = gen_proto.GraspitMessage_pb2.GraspitProtobufMessage()
        #gm.renderable.pointCloud.points.add()
        gm.renderable.pointCloud.points.extend(
            [gen_proto.Renderable_pb2.Renderable.PointXYZRGB()] *
            (pointcloud_msg.width * pointcloud_msg.height /
             (self.downsample_factor**2)))

        #renderable = gen_proto.Renderable_pb2.Renderable()
        #pointcloud = gen_proto.Renderable_pb2.Renderable.PointCloudXYZRGB()
        for ind, point in enumerate(points):
            #pointXYZRGB = gen_proto.Renderable_pb2.Renderable.PointXYZRGB()

            color = self.get_color_converted_point(point)

            point_location = numpy.dot(
                pointcloud_transform,
                numpy.array([point[:3] + (1, )]).transpose())
            renderable = gm.renderable
            assert isinstance(renderable, gen_proto.Renderable_pb2.Renderable)
            pointCloud = renderable.pointCloud
            assert isinstance(
                pointCloud,
                gen_proto.Renderable_pb2.Renderable.PointCloudXYZRGB)
            points = pointCloud.points
            assert isinstance(points,
                              gen_proto.Renderable_pb2.Renderable.PointXYZRGB)
            pointCloud.points[ind].point.x = point_location[0, 0]
            pointCloud.points[ind].point.y = point_location[1, 0]
            pointCloud.points[ind].point.z = point_location[2, 0]

            pointCloud.points[ind].color.red = color[0]
            pointCloud.points[ind].color.green = color[1]
            pointCloud.points[ind].color.blue = color[2]

        renderable.pointCloud.units = 1.0

        rospy.loginfo(
            str(self.__class__) +
            "::GraspitProtobufSocket:: Finished building protobuf" +
            "pointcloud message")
        return gm
    def make_service_call(self, request, *args):
        # Check to see if service exists
        try:
            rospy.wait_for_service('costar/ServoToPose')
        except rospy.ROSException as e:
            rospy.logerr('Could not find servo service')
            self.finished_with_success = False
            return
        # Make servo call to set pose
        try:
            pose_servo_proxy = rospy.ServiceProxy('costar/ServoToPose',
                                                  ServoToPose)

            F_command_world = tf_c.fromTf(
                self.listener_.lookupTransform(
                    '/world', '/' + self.command_waypoint_name, rospy.Time(0)))
            F_base_world = tf_c.fromTf(
                self.listener_.lookupTransform('/world', '/base_link',
                                               rospy.Time(0)))
            F_command = F_base_world.Inverse() * F_command_world

            msg = costar_robot_msgs.srv.ServoToPoseRequest()
            msg.target = tf_c.toMsg(F_command)
            msg.vel = self.command_vel
            msg.accel = self.command_acc

            # Send Servo Command
            rospy.loginfo('Single Servo Move Started')
            result = pose_servo_proxy(msg)
            if 'FAILURE' in str(result.ack):
                rospy.logwarn('Servo failed with reply: ' + str(result.ack))
                self.finished_with_success = False
                return
            else:
                rospy.loginfo('Single Servo Move Finished')
                rospy.loginfo('Robot driver reported: ' + str(result.ack))
                self.finished_with_success = True
                return

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException, rospy.ServiceException), e:
            rospy.logerr('There was a problem with the tf lookup or service:')
            rospy.logerr(e)
            self.finished_with_success = False
            return
    def update_table(self):
        try:
            trans = tf_conversions.toMatrix(tf_conversions.fromTf(self.transform_listener.lookupTransform('world','object', rospy.Time(0))))
            trans[0:3,3] *= 1000 
            trans[2,3] -=  70
            self.graspit_commander.set_body_trans('experiment_table', trans)

        except:
            pdb.post_mortem()
 def try_get_world_transform(self):
     try:
         self.transform_listener.waitForTransform("/camera_rgb_optical_frame", "/world", rospy.Time(0),rospy.Duration(.1))
     except:
         return False
     self.world_transform = tf_conversions.toMatrix(tf_conversions.fromTf(
         self.transform_listener.lookupTransform(
             "/camera_rgb_optical_frame",'/world', rospy.Time(0))))
     return True
Ejemplo n.º 13
0
    def update(self):

        all_frames = self.listener_.getFrameStrings()
        ar_frames = [f for f in all_frames if f.find('ar_marker')>=0]
        un_filtered = [f for f in ar_frames if f.find('filtered')<0]

        # Decay each frame count at every timestep
        for f in self.frames.keys():
            self.frames[f]['no_frames'] -= 1
            if self.frames[f]['no_frames'] <= 0:
                short_name = 'landmark_' + f.split('_')[len(f.split('_'))-1:][0]
                rospy.delete_param('instructor_landmark/'+short_name)
                rospy.logwarn('Deleted:' +short_name)
                self.frames.pop(f)

        for frame in un_filtered:
            try:
                F = tf_c.fromTf(self.listener_.lookupTransform('/world',frame,rospy.Time(0)))
            except (tf.LookupException, tf.ConnectivityException) as e:
                rospy.logerr('Frame ['+frame+'] not found')
                return
            except tf.ExtrapolationException as e:
                # The frame we're seeing is old
                return

            if frame not in self.frames.keys():
                self.frames[frame] = {'no_frames':0, 'poses':[], 'average_pose':None}
                rospy.logwarn('New Frame:' + frame)

            # rospy.logwarn(self.frames[frame]['no_frames'])
            if self.frames[frame]['no_frames'] < self.buffer:
                self.frames[frame]['no_frames'] += 2
                self.frames[frame]['poses'].append(F)
            else:
                self.frames[frame]['poses'].pop(0)
                self.frames[frame]['poses'].pop(0)
                self.frames[frame]['poses'].append(F)

        for frame in self.frames.keys():
            # Get all stored frame positions/rotations
            sum_xyz = [tf_c.toTf(f)[0] for f in self.frames[frame]['poses']]
            sum_rpy = [f.M.GetRPY() for f in self.frames[frame]['poses']]

            if len(sum_xyz) > 2:
                xyz = np.mean(sum_xyz, 0)
                rpy = np.mean(sum_rpy, 0)

                F_avg = PyKDL.Frame()
                F_avg.p = PyKDL.Vector(*xyz)
                F_avg.M = PyKDL.Rotation.RPY(*rpy)

                self.broadcaster_.sendTransform(tuple(F_avg.p),tuple(F_avg.M.GetQuaternion()),rospy.Time.now(), '/filtered/'+frame, '/world')
                # rosparam_marker_name = "instructor_landmark/{}".format(str('filtered/'+frame))
                # rospy.set_param(rosparam_marker_name, str('filtered/'+frame))
                short_name = 'landmark_' + frame.split('_')[len(frame.split('_'))-1:][0]
                rospy.set_param('instructor_landmark/'+short_name, str('filtered/'+frame))
    def update(self):
        if not self.follow == 'DISABLED':
            try:
                if self.follow == 'MARKER':
                    F_target_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/target_frame',rospy.Time(0)))
                    F_target_base = tf_c.fromTf(self.listener_.lookupTransform('/base_link','/target_frame',rospy.Time(0)))
                elif self.follow == 'INTERACT':
                    F_target_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/endpoint_interact',rospy.Time(0)))
                    F_target_base = tf_c.fromTf(self.listener_.lookupTransform('/base_link','/endpoint_interact',rospy.Time(0)))

                F_base_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/base_link',rospy.Time(0)))
                F_command = F_base_world.Inverse()*F_target_world

                cmd = PoseStamped()
                cmd.pose = tf_c.toMsg(F_command)
                self.target_pub.publish(cmd)

            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
                rospy.logwarn(str(e))
Ejemplo n.º 15
0
    def update_from_marker(self):
        try:
            F_target_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/endpoint_interact',rospy.Time(0)))
            F_target_base = tf_c.fromTf(self.listener_.lookupTransform('/base_link','/endpoint_interact',rospy.Time(0)))
            F_base_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/base_link',rospy.Time(0)))
            self.F_command = F_base_world.Inverse()*F_target_world
            M_command = tf_c.toMatrix(self.F_command)

            joint_positions = self.kdl_kin.inverse(M_command, self.q) # inverse kinematics
            if joint_positions is not None:
                pose_sol = self.kdl_kin.forward(joint_positions) # should equal pose
                self.q = joint_positions
            else:
                rospy.logwarn('no solution found')

            # self.send_command()

        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
            rospy.logwarn(str(e))
 def _get_world_transform(self):
     try:
         self._transform_listener.waitForTransform("/camera_rgb_optical_frame", "/world", rospy.Time(0), rospy.Duration(10))
     except:
         rospy.logwarn("Failed to get world transform for: " + self.__class__.__name())
         return None
     world_transform = tf_conversions.toMatrix(tf_conversions.fromTf(
         self._transform_listener.lookupTransform(
             "/camera_rgb_optical_frame", '/world', rospy.Time(0))))
     return world_transform
Ejemplo n.º 17
0
def get_world_transform():
    transform_listener = tf.TransformListener()
    try:
        transform_listener.waitForTransform("/camera_rgb_optical_frame", "/world", rospy.Time(0), rospy.Duration(10))
    except:
        rospy.logwarn("Failed to get world transform for: ")
        return None
    world_transform = tf_conversions.toMatrix(tf_conversions.fromTf(
            transform_listener.lookupTransform(
            "/camera_rgb_optical_frame", '/world', rospy.Time(0))))
    return world_transform
    def make_service_call(self,request,*args):
        # Check to see if service exists
        try:
            rospy.wait_for_service('/simple_ur_msgs/ServoToPose')
        except rospy.ROSException as e:
            rospy.logerr('Could not find servo service')
            self.finished_with_success = False
            return
        # Make servo call to set pose
        try:
            pose_servo_proxy = rospy.ServiceProxy('/simple_ur_msgs/ServoToPose',ServoToPose)
            
            F_command_world = tf_c.fromTf(self.listener_.lookupTransform('/world', '/'+self.command_waypoint_name, rospy.Time(0)))
            F_base_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/base_link',rospy.Time(0)))
            F_command = F_base_world.Inverse()*F_command_world
                
            msg = simple_ur_msgs.srv.ServoToPoseRequest()
            msg.target = tf_c.toMsg(F_command)
            msg.vel = self.command_vel
            msg.accel = self.command_acc
            # Send Servo Command
            rospy.loginfo('Single Servo Move Started')
            result = pose_servo_proxy(msg)
            if 'FAILURE' in str(result.ack):
                rospy.logwarn('Servo failed with reply: '+ str(result.ack))
                self.finished_with_success = False
                return
            else:
                rospy.loginfo('Single Servo Move Finished')
                rospy.logwarn('Robot driver reported: '+str(result.ack))
                self.finished_with_success = True
                return

        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException, rospy.ServiceException), e:
            rospy.logwarn('There was a problem with the tf lookup or service:')
            rospy.logerr(e)
            self.finished_with_success = False
            return
Ejemplo n.º 19
0
 def _get_world_transform(self):
     try:
         self._transform_listener.waitForTransform(
             "/camera_rgb_optical_frame", "/world", rospy.Time(0),
             rospy.Duration(10))
     except:
         rospy.logwarn("Failed to get world transform for: " +
                       self.__class__.__name())
         return None
     world_transform = tf_conversions.toMatrix(
         tf_conversions.fromTf(
             self._transform_listener.lookupTransform(
                 "/camera_rgb_optical_frame", '/world', rospy.Time(0))))
     return world_transform
Ejemplo n.º 20
0
    def check_landmarks(self):
        params = rospy.get_param_names()
        landmarks = [p for p in params if p.find('instructor_landmark') >= 0]
        #rospy.logwarn("landmarks: " + str(landmarks))
        all_landmark_names = []

        for L in landmarks:
            all_landmark_names.append(L.replace('/instructor_landmark/', ''))

        for L in landmarks:
            landmark_name = L.replace('/instructor_landmark/', '')
            #rospy.logwarn(landmark_name)
            try:
                landmark_tf_name = rospy.get_param(L)
            except KeyError:
                rospy.logerr("Could not find landmark %s!" % (L))
                continue
            #rospy.logwarn(landmark_tf_name)

            try:
                self.listener_.waitForTransform('/world', landmark_tf_name,
                                                rospy.Time(),
                                                rospy.Duration(4.0))
                try:
                    F = tf_c.fromTf(
                        self.listener_.lookupTransform('/world',
                                                       landmark_tf_name,
                                                       rospy.Time(0)))
                    self.saved_landmark_frames[landmark_name] = F
                except (tf.LookupException, tf.ConnectivityException) as e:
                    rospy.logerr('Frame [' + landmark_tf_name + '] not found')
                    return

                if landmark_name not in self.landmarks.keys():
                    self.landmarks[landmark_name] = '/' + landmark_name
                    # short_name = 'landmark_' + landmark_name.split('_')[len(landmark_name.split('_'))-1:][0]
                    self.add_landmark_marker('/' + landmark_name,
                                             landmark_name)
                else:
                    if landmark_name in self.stale_landmarks.keys():
                        self.stale_landmarks.pop(landmark_name)
                        self.add_landmark_marker('/' + landmark_name,
                                                 landmark_name)
            except tf.Exception, e:
                rospy.logerr('Frame [' + landmark_tf_name +
                             '] not found (TIMEOUT)')
                if rospy.has_param(L):
                    rospy.delete_param(L)
    def make_pointcloud_proto(self, pointcloud_msg):
        """
        :param pointcloud_msg: input sensor_msgs.msg.PointCloud2
        :type pointcloud_msg: sensor_msgs.msg.PointCloud2
        :rtype: gen_proto.GraspitMessage_pb2.GraspitProtobufMessage
        """
        pointcloud_transform = tf_conversions.toMatrix(tf_conversions.fromTf(self.listener.lookupTransform(
            self.graspit_frame, pointcloud_msg.header.frame_id, rospy.Time(0))))
        rospy.loginfo(self.__class__.__name__ + " is relaying message")
        if self.downsample_factor != 1:
            self.create_uvs(pointcloud_msg)
        points = sensor_msgs.point_cloud2.read_points(pointcloud_msg, None, False, self.uvs)
        #points = sensor_msgs.point_cloud2.read_points(pointcloud_msg)

        gm = gen_proto.GraspitMessage_pb2.GraspitProtobufMessage()
        #gm.renderable.pointCloud.points.add()
        gm.renderable.pointCloud.points.extend(
            [gen_proto.Renderable_pb2.Renderable.PointXYZRGB()] *
            (pointcloud_msg.width*pointcloud_msg.height/(self.downsample_factor**2)))


        #renderable = gen_proto.Renderable_pb2.Renderable()
        #pointcloud = gen_proto.Renderable_pb2.Renderable.PointCloudXYZRGB()
        for ind,point in enumerate(points):
            #pointXYZRGB = gen_proto.Renderable_pb2.Renderable.PointXYZRGB()

            color = self.get_color_converted_point(point)

            point_location = numpy.dot(pointcloud_transform, numpy.array([point[:3] + (1,)]).transpose())
            renderable = gm.renderable
            assert isinstance(renderable, gen_proto.Renderable_pb2.Renderable)
            pointCloud = renderable.pointCloud
            assert isinstance(pointCloud, gen_proto.Renderable_pb2.Renderable.PointCloudXYZRGB)
            points = pointCloud.points
            assert isinstance(points, gen_proto.Renderable_pb2.Renderable.PointXYZRGB)
            pointCloud.points[ind].point.x = point_location[0, 0]
            pointCloud.points[ind].point.y = point_location[1, 0]
            pointCloud.points[ind].point.z = point_location[2, 0]

            pointCloud.points[ind].color.red = color[0]
            pointCloud.points[ind].color.green = color[1]
            pointCloud.points[ind].color.blue = color[2]

        renderable.pointCloud.units = 1.0

        rospy.loginfo(str(self.__class__) + "::GraspitProtobufSocket:: Finished building protobuf"
                                          + "pointcloud message")
        return gm
Ejemplo n.º 22
0
def get_frames(num, step, trans, rot, frame=None, off=0, suffix=""):

    f = tfc.fromTf((trans, rot))

    bc = tf.TransformBroadcaster()

    names = []

    for i in range(1, num+1):
        r = tfc.Rotation(f.M)

        (roll, pitch, yaw) = r.GetRPY()
        r = tfc.Rotation.RPY(roll, pitch, yaw + step)

        rf = tfc.Frame(tfc.Rotation.RotZ(step))
        rfz = tfc.Frame(tfc.Rotation.RPY(0,3.14159,0))

        p = rf * tfc.Vector(f.p)
        p2 = tfc.Vector(-1*p.x(), -1*p.y(), -1*p.z())

        r2 = tfc.Rotation.RPY(roll + 3.14159, pitch, (yaw + step))

        f = tfc.Frame(r, p)
        f2 = tfc.Frame(r2, p2)

        (trans2, rot2) = tfc.toTf(f)
        (trans3, rot3) = tfc.toTf(f2)

        if verbose:
            print (trans2, rot2)
            if flipped_frames_allowed:
                print (trans3, rot3)

        if not frame == None:
            bc.sendTransform(trans2, rot2, rospy.Time.now(), frame2 + suffix + "_gen" + str(off+i), frame)
            names.append(frame2 + suffix + "_gen" + str(off+i))

            if flipped_frames_allowed:
                bc.sendTransform(trans3, rot3, rospy.Time.now(), frame2  + suffix + "_flip" + str(off+i), frame)
                names.append(frame2 + suffix + "_flip" + str(off+i))

        if verbose:
            print "---" + str(i) + "---"

    return names
    def update(self):
        for name,waypoint in self.waypoints.items():
            F = tf_c.fromMsg(waypoint)
            self.broadcaster_.sendTransform(tuple(F.p),tuple(F.M.GetQuaternion()),rospy.Time.now(), name, '/world')

        for name,waypoint_data in self.relative_waypoints.items():
            try:
                F_relative_frame_waypoint = tf_c.fromMsg(waypoint_data[0])
                relative_frame_name = waypoint_data[1]
                F_world_relative_frame = tf_c.fromTf(self.listener_.lookupTransform('/world', relative_frame_name, rospy.Time(0)))
                F_relative_waypoint = F_world_relative_frame*F_relative_frame_waypoint
                self.broadcaster_.sendTransform(tuple(F_relative_waypoint.p),tuple(F_relative_waypoint.M.GetQuaternion()),rospy.Time.now(), name, '/world')
                self.F_relative_waypoint_last = F_relative_waypoint
            except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
                if self.F_relative_waypoint_last == None:
                    pass
                else:
                    self.broadcaster_.sendTransform(tuple(self.F_relative_waypoint_last.p),tuple(self.F_relative_waypoint_last.M.GetQuaternion()),rospy.Time.now(), name, '/world')

        self.check_landmarks()
 def add_waypoint(self):
     if self.new_waypoint_name:
         try:
             F_waypoint = tf_c.fromTf(self.listener_.lookupTransform('/world','/endpoint',rospy.Time(0)))
         except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
             rospy.logerr('Could not find the tf frame for the robot endpoint')
             return
         try:
             rospy.wait_for_service('/instructor_core/AddWaypoint',2)
         except rospy.ROSException as e:
             rospy.logerr(e)
             return
         try:
             add_waypoint_proxy = rospy.ServiceProxy('/instructor_core/AddWaypoint',AddWaypoint)
             msg = AddWaypointRequest()
             msg.name = '/' + self.new_waypoint_name
             msg.world_pose = tf_c.toMsg(F_waypoint)
             rospy.loginfo(add_waypoint_proxy(msg))
             self.update_waypoints()
         except rospy.ServiceException, e:
             rospy.logerr(e)
    def check_landmarks(self):
        params = rospy.get_param_names()
        landmarks = [p for p in params if p.find('instructor_landmark')>=0]
        #rospy.logwarn("landmarks: " + str(landmarks))
        all_landmark_names =[]

        for L in landmarks:
            all_landmark_names.append(L.replace('/instructor_landmark/',''))

        for L in landmarks:
            landmark_name = L.replace('/instructor_landmark/','')
            #rospy.logwarn(landmark_name)
            try:
                landmark_tf_name = rospy.get_param(L)
            except KeyError:
                rospy.logerr("Could not find landmark %s!"%(L))
                continue
            #rospy.logwarn(landmark_tf_name)

            try:
              self.listener_.waitForTransform('/world',landmark_tf_name, rospy.Time(), rospy.Duration(4.0))
              try:
                  F = tf_c.fromTf(self.listener_.lookupTransform('/world',landmark_tf_name,rospy.Time(0)))
                  self.saved_landmark_frames[landmark_name] = F
              except (tf.LookupException, tf.ConnectivityException) as e:
                  rospy.logerr('Frame ['+landmark_tf_name+'] not found')
                  return

              if landmark_name not in self.landmarks.keys():
                  self.landmarks[landmark_name] = '/'+landmark_name
                  # short_name = 'landmark_' + landmark_name.split('_')[len(landmark_name.split('_'))-1:][0]
                  self.add_landmark_marker('/'+landmark_name,landmark_name)
              else:
                  if landmark_name in self.stale_landmarks.keys():
                      self.stale_landmarks.pop(landmark_name)
                      self.add_landmark_marker('/'+landmark_name,landmark_name)
            except tf.Exception, e:
                rospy.logerr('Frame ['+landmark_tf_name+'] not found (TIMEOUT)')
                if rospy.has_param(L):
                    rospy.delete_param(L)
Ejemplo n.º 26
0
def Map2localization(trans):
    global estimated_tf, init, localize, offset_hist, map2odom
    offset = TransformStamped()
    # We want to transform the detected aruco pose to map frame to do a reasonable comparison
    qinv = quaternion_from_euler(0, 0, 0)
    qdetected = quaternion_from_euler(0, 0, 0)
    if init:
        #Making sure that a Kalman filter is initialized for each marker if we don't have the Aruco Marker registered in the dictionary.
        estimated_tf = cv2.KalmanFilter(6,
                                        6)  # Initialization of kalman filter.
        trans_mat = np.identity(6, np.float32)
        estimated_tf.transitionMatrix = trans_mat  #x'=Ax+BU         transition matrix is A
        estimated_tf.measurementMatrix = trans_mat
        estimated_tf.processNoiseCov = cv2.setIdentity(
            estimated_tf.processNoiseCov, 1e-3)  #4
        estimated_tf.measurementNoiseCov = cv2.setIdentity(
            estimated_tf.measurementNoiseCov, 1e-2)  #2
        estimated_tf.errorCovPost = cv2.setIdentity(
            estimated_tf.errorCovPost)  #, 1)
        init = False

    if not buff.can_transform(trans.child_frame_id, 'cf1/odom',
                              trans.header.stamp, rospy.Duration(0.5)):
        rospy.logwarn_throttle(
            5.0, 'No tranform from %s to map' % trans.child_frame_id)
        return
    else:
        t = TransformStamped()
        try:
            # We want to keep the relative position.... We can calculate the error betwen these frames.
            t = buff.lookup_transform('cf1/odom', trans.child_frame_id,
                                      rospy.Time(0), rospy.Duration(0.5))
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            rospy.logwarn_throttle(
                5.0, 'No tranform from %s to odom' % trans.child_frame_id)

            return

        #t.transform.translation.z=0.35
        #==============================================================================
        #Ludvig's solution
        #==============================================================================
        Fodom = tf_conversions.fromTf(
            ((t.transform.translation.x, t.transform.translation.y,
              t.transform.translation.z),
             (t.transform.rotation.x, t.transform.rotation.y,
              t.transform.rotation.z, t.transform.rotation.w)))
        Fmap = tf_conversions.fromMsg(json_markers[int(
            trans.child_frame_id[-1])])

        Fdiff = Fmap * Fodom.Inverse()

        offset = TransformStamped()
        offset.header.stamp = rospy.Time.now()
        offset.header.frame_id = 'map'
        offset.child_frame_id = 'cf1/odom'
        ((offset.transform.translation.x, offset.transform.translation.y,
          offset.transform.translation.z),
         (offset.transform.rotation.x, offset.transform.rotation.y,
          offset.transform.rotation.z,
          offset.transform.rotation.w)) = tf_conversions.toTf(Fdiff)
        #======================================
        #======================================
        #=====================================
        #offset.transform.translation.z = 0 # Not considering any changes in z-axis
        #print("before filter " + str(offset.transform.translation.x) + " " + str(offset.transform.translation.y) + " " + str(offset.transform.translation.z))
        """
        print("")
        offset.transform.translation.x = butter_lowpass_filter(offset.transform.translation.x, cutoff, fs, order)
        offset.transform.translation.y = butter_lowpass_filter(offset.transform.translation.y, cutoff, fs, order)
        offset.transform.translation.z = butter_lowpass_filter(offset.transform.translation.z, cutoff, fs, order)
        #offset.transform.rotation.x = butter_lowpass_filter(offset.transform.rotation.x, cutoff, fs, order)
        #offset.transform.rotation.y = butter_lowpass_filter(offset.transform.rotation.y, cutoff, fs, order)
        #offset.transform.rotation.z = butter_lowpass_filter(offset.transform.rotation.z, cutoff, fs, order)
        #offset.transform.rotation.w = butter_lowpass_filter(offset.transform.rotation.w, cutoff, fs, order)
        """
        #print("after filter " + str(offset.transform.translation.x) + " " + str(offset.transform.translation.y) + " " + str(offset.transform.translation.z))

        #=====================================
        #offset_hist.append((offset.transform.translation.x,offset.transform.translation.y,offset.transform.translation.z))
        #offset_hist.append(offset.transform.translation.z)

        # pp.pprint(offset_hist)
        # print('list\n')
        #print(offset_hist)
        # if len(offset_hist)>1:
        #     offset_hist=offset_hist[-1:]
        #     #print(offset_hist)
        #     average = np.mean(offset_hist, axis=0)
        #     #print(average)
        #     offset.transform.translation.x =average[0]
        #     offset.transform.translation.y =average[1]
        #     offset.transform.translation.z =average[2]
        #     #offset.transform.translation.y = average[1]
        # #     print('hi!')
        # #     Fdiff=np.average(offset_hist)
        # #     print('list 2\n')
        # #     pp.pprint(Fdiff)
        # #     #sum(offset_hist)/float(len(offset_hist))
        #     del offset_hist[:]
        #     offset.transform.translation.z = 0
        # #     #=====================================

        #     tf_bc.sendTransform(offset)
        #     map2odom=offset
        #     localize.data=True
        #     pub_tf.publish(localize)
        #     #=====================================

        #offset.transform.translation.z = 0 # Not considering any changes in z-axis

        filtered_offset = copy.deepcopy(offset)

        # ########Update the Kalman filter
        rot_ang = np.array([
            filtered_offset.transform.rotation.x,
            filtered_offset.transform.rotation.y,
            filtered_offset.transform.rotation.z,
            filtered_offset.transform.rotation.w
        ], np.float32).reshape(-1, 1)
        translation = np.array([
            filtered_offset.transform.translation.x,
            filtered_offset.transform.translation.y
        ], np.float32).reshape(-1, 1)

        #                             #    x,y,z       roll,pitch,yaw
        measurements = np.concatenate((translation, rot_ang))
        prediction = estimated_tf.predict()
        estimation = estimated_tf.correct(measurements.reshape(-1, 1))

        offset.transform.translation.x = estimation[0]
        offset.transform.translation.y = estimation[1]

        offset.transform.translation.x = estimation[0]
        offset.transform.translation.y = estimation[1]
        offset.transform.translation.z = 0
        offset.transform.rotation.x = estimation[2]
        offset.transform.rotation.y = estimation[3]
        offset.transform.rotation.z = estimation[4]
        offset.transform.rotation.w = estimation[5]
        n = np.linalg.norm([
            offset.transform.rotation.x, offset.transform.rotation.y,
            offset.transform.rotation.z, offset.transform.rotation.w
        ])
        # #Normalize the quaternion
        offset.transform.rotation.x /= n
        offset.transform.rotation.y /= n
        offset.transform.rotation.z /= n
        offset.transform.rotation.w /= n
        #'''
        #tf_bc.sendTransform(offset)
        map2odom = offset
        localize.data = True
        pub_tf.publish(localize)
Ejemplo n.º 27
0
    def update(self):
        # Return if the calibration is finished
        if not self.update_poses:
            return

        # Get all current TF frames. Add any new ones to AR tag list.
        current_frames = self.tf_listen.getFrameStrings()
        current_frames = [
            f for f in current_frames
            if "ar_marker" in f and not ("filtered" in f)
        ]
        # current_frames = [f for f in current_frames if "ar_marker" in f and "filtered" in f]
        for f in current_frames:
            # Detect which camera the marker is sent from
            for c in range(self.n_cameras):
                if self.cameras[c] in f:
                    camera = c
                    break

            # Find tag name, camera frame, and pose
            fid = f[f.find("ar_marker_") + len("ar_marker") + 1:len(f)]
            camera_link = "{}_link".format(self.cameras[camera])

            try:
                ret = tf_c.fromTf(
                    self.tf_listen.lookupTransform(camera_link, f,
                                                   rospy.Time(0)))
            except:
                print "Can't get TF camera->marker", camera_link, f
                continue

            # Check if we've seen this marker before
            if fid not in self.camera_marker_frame_names[camera].keys():
                self.camera_marker_frame_names[camera][fid] = {
                    'name': f,
                    'count': 1
                }
                print "New marker in calibration:", camera, fid, f
            else:
                self.camera_marker_frame_names[camera][fid]['count'] += 1

        # Check for overlapping frames
        marker_1_names = self.camera_marker_frame_names[0].keys()
        marker_2_names = self.camera_marker_frame_names[1].keys()
        overlapping_markers = list(
            set(marker_1_names).intersection(marker_2_names))
        n_overlapping_markers = len(overlapping_markers)
        print "---1---", self.camera_marker_frame_names[0]
        print "---2---", self.camera_marker_frame_names[1]

        # Check if camera 1 sees any fiducials
        if len(self.camera_marker_frame_names[0]) > 0:
            # if the other camera has seen a fiducial then make sure we're seeing the same one
            if all(self.cameras_calibrated):
                if n_overlapping_markers > 0:
                    self._widget.shoulder_status_label.setText(
                        'FOUND FIDUCIAL')
                    self._widget.shoulder_status_label.setStyleSheet(
                        'color:#ffffff;background-color:#9AD111')
                    self.cameras_calibrated[0] = True
                else:
                    self._widget.shoulder_status_label.setText(
                        "FIDUCIALS DON'T MATCH")
            # Else if the only marker seen then say 'found'
            else:
                self._widget.shoulder_status_label.setText('FOUND FIDUCIAL')
                self._widget.shoulder_status_label.setStyleSheet(
                    'color:#ffffff;background-color:#9AD111')
                self.cameras_calibrated[0] = True

        # Check if camera 2 sees any fiducials
        if len(self.camera_marker_frame_names[1]) > 0:
            # if the other camera has seen a fiducial then make sure we're seeing the same one
            if all(self.cameras_calibrated):
                n_overlapping_markers = len(
                    set(marker_1_names).intersection(marker_2_names))
                if n_overlapping_markers > 0:
                    self._widget.wrist_status_label.setText('FOUND FIDUCIAL')
                    self._widget.wrist_status_label.setStyleSheet(
                        'color:#ffffff;background-color:#9AD111')
                    self.cameras_calibrated[1] = True
                else:
                    self._widget.wrist_status_label.setText(
                        "FIDUCIALS DON'T MATCH")
            # Else if the only marker seen then say 'found'
            else:
                self._widget.wrist_status_label.setText('FOUND FIDUCIAL')
                self._widget.wrist_status_label.setStyleSheet(
                    'color:#ffffff;background-color:#9AD111')
                self.cameras_calibrated[1] = True

        # Check if all cameras are calibrated
        if self.cameras_calibrated[0] and self.cameras_calibrated[1]:
            # First verify that the camera links (base to optical frames) are valid
            try:
                ret = self.tf_listen.lookupTransform(
                    "/base_link", "camera_1_rgb_optical_frame", rospy.Time(0))
                ret = self.tf_listen.lookupTransform(
                    "/base_link", "camera_2_rgb_optical_frame", rospy.Time(0))
            except:
                txt = "ROBOT NOT CONNECTED"
                self._widget.robot_calibration_status_label.setText(txt)
                return

            # Check that each camera has seen the same fiduual
            if n_overlapping_markers == 0:
                self._widget.robot_calibration_status_label.setText(
                    "FIDUCIALS DON'T MATCH")
                return

            # Get transform from robot base to camera 2
            """
            Want: World -> Cam2
            Have:
                Cam1->Marker1
                Cam2->Marker2
                Robot->Cam1
            Robot->Cam2 = (Robot->Cam1) * (Cam1->Marker1) * (Cam2->Marker2)^-1
            """

            # Find one of the markers seen by both cameras
            # print overlapping_markers, self.camera_marker_frame_names[0].keys(), self.camera_marker_frame_names[1].keys()
            marker_1_counts = [
                self.camera_marker_frame_names[0][x]['count']
                for x in overlapping_markers
            ]
            marker_2_counts = [
                self.camera_marker_frame_names[1][x]['count']
                for x in overlapping_markers
            ]
            overlap_count = [(marker_1_counts[i] + marker_2_counts[i]) / 2.
                             for i in range(n_overlapping_markers)]
            ar_idx = overlapping_markers[np.argmax(overlap_count)]

            marker_1 = self.camera_marker_frame_names[0][ar_idx]['name']
            marker_2 = self.camera_marker_frame_names[1][ar_idx]['name']
            # print "Overlapping name:", marker_1, marker_2

            base_frame = '/base_link'
            try:
                T_base_camera_1 = tf_c.fromTf(
                    self.tf_listen.lookupTransform(base_frame,
                                                   '/camera_1_link',
                                                   rospy.Time(0)))
            except:
                rospy.logwarn("Can't get TF robot->camera1")
                return

            try:
                T_camera_1_marker_1 = tf_c.fromTf(
                    self.tf_listen.lookupTransform('/camera_1_link', marker_1,
                                                   rospy.Time(0)))
            except:
                rospy.logwarn("Can't get TF camera1->marker")
                return
            try:
                T_camera_2_marker_2 = tf_c.fromTf(
                    self.tf_listen.lookupTransform('/camera_2_link', marker_2,
                                                   rospy.Time(0)))
            except:
                rospy.logwarn("Can't get TF camera2->marker")
                return

            T_base_camera_2 = T_base_camera_1 * T_camera_1_marker_1 * T_camera_2_marker_2.Inverse(
            )

            # Extract position and rotation for the new transformation
            xyz = tf_c.toTf(T_base_camera_2)[0]
            rpy = T_base_camera_2.M.GetRPY()

            # Smooth out output frame to prevent large changes due to noise marker normals
            self.all_xyz += [xyz]
            self.all_rpy += [rpy]

            if len(self.all_xyz) > 2:
                xyz = np.mean(self.all_xyz, 0)
                rpy = np.mean(self.all_rpy, 0)
                # xyz = np.median(self.all_xyz, 0)
                # rpy = np.median(self.all_rpy, 0)
            else:
                return

            # Send the transform to the semi-static transformer
            # First check to see if service exists
            try:
                rospy.wait_for_service(
                    '/semi_static_transform_publisher/UpdateTransform')
            except rospy.ROSException as e:
                rospy.logerr('Could not find semi-static transform service')
                return
            # Make servo call to set pose
            try:
                # Setup transform to send to sem-static node
                update_transform_proxy = rospy.ServiceProxy(
                    '/semi_static_transform_publisher/UpdateTransform',
                    UpdateTransform)
                msg = UpdateTransformRequest()
                msg.x, msg.y, msg.z = xyz
                msg.roll, msg.pitch, msg.yaw = rpy

                # Call to service
                update_transform_proxy(msg)
            except (rospy.ServiceException), e:
                rospy.logwarn('There was a problem with the service:')
                rospy.logwarn(e)

            # Update GUI
            self._widget.robot_calibration_status_label.setText(
                'ROBOT CALIBRATED')
            self._widget.robot_calibration_status_label.setStyleSheet(
                'color:#ffffff;background-color:#9AD111')
            self._widget.done_btn.setStyleSheet('color:#429611')
            self.calibrated = True
class MotionRellocNode:
    def __init__(self):
        action_ns = rospy.get_param('~action_ns', '')
        robot_name = rospy.get_param('~robot_name')
        self.lock = threading.RLock()

        self.publish_rate = rospy.get_param('~publish_rate', 50)

        sec = rospy.get_param('~tf_exp_time', 90.0)
        self.tf_exp_time = rospy.Duration(sec)

        self.human_frame = rospy.get_param('~human_frame_id',
                                           'human_footprint')
        self.robot_root_frame = rospy.get_param('~robot_root_frame',
                                                robot_name + '/odom')
        drone_goto_action_ns = rospy.get_param(
            '~drone_goto_action_ns', '/' + robot_name + '/goto_action')
        drone_shape_action_ns = rospy.get_param(
            '~drone_followpath_action_ns',
            '/' + robot_name + '/followshape_action')
        drone_followme_action_ns = rospy.get_param(
            '~drone_followme_action_ns', '/' + robot_name + '/followme_action')

        self.ray_origin_frame = rospy.get_param('~ray_origin_frame', 'eyes')
        self.ray_direction_frame = rospy.get_param('~ray_direction_frame',
                                                   'pointer')
        self.ray_inverse = rospy.get_param('~ray_inverse', False)

        pose_topic = rospy.get_param('~robot_pose_topic',
                                     '/' + robot_name + '/odom/pose/pose')
        pose_topic_class, pose_real_topic, pose_eval_func = rostopic.get_topic_class(
            pose_topic)
        self.robot_pose_msg_eval = pose_eval_func

        ############ FIXME ############
        trigger_topic = rospy.get_param('~trigger_topic',
                                        '/' + robot_name + '/joy/buttons[6]')
        trigger_topic_class, trigger_real_topic, trigger_eval_func = rostopic.get_topic_class(
            trigger_topic)
        self.trigger_msg_eval = trigger_eval_func
        # self.trigger_sub = rospy.Subscriber(trigger_real_topic, trigger_topic_class, self.trigger_cb)
        self.trigger_val = None
        self.last_trigger_val = None
        ###############################

        self.timewindow = rospy.get_param('~timewindow', 5.0)
        self.sync_freq = rospy.get_param('~freq', 20.0)
        self.sample_size = rospy.get_param('~sample_size',
                                           self.sync_freq * 3.0)

        self.residual_threshold = np.radians(
            rospy.get_param('~residual_threshold_deg', 3.0))

        self.robot_motion_span_min = rospy.get_param('~robot_motion_span_min',
                                                     0.20)  # 20 cm

        if self.timewindow and self.sync_freq:
            self.queue_size = int(self.timewindow * self.sync_freq)
            # 5.0 seconds at 5 Hz
            rospy.loginfo('Max queue size: {}'.format(self.queue_size))
            if self.sample_size > self.queue_size:
                rospy.loginfo(
                    'sample_size [{}] is bigger than queue_size [{}]. Setting sample_size = queue_size'
                    .format(self.sample_size, self.queue_size))
                self.sample_size = self.queue_size
        else:
            rospy.logwarn(
                'Either timewindow or queue_size is set to 0. Using unbound queue.'
            )
            self.queue_size = None
        self.deque = collections.deque(maxlen=self.queue_size)

        self.relloc_deque = collections.deque(
            maxlen=self.sync_freq *
            1.0)  # Use 1s of relloc data to trigger selection

        self.robot_pose_msg = None
        self.robot_sub = rospy.Subscriber(pose_real_topic, pose_topic_class,
                                          self.robot_pose_cb)

        self.is_valid_pub = rospy.Publisher('is_relloc_valid',
                                            Bool,
                                            queue_size=10)

        self.initial_guess = np.array([0, 0, 0, 0])
        self.reset_state()

        self.tf_buff = tf2_ros.Buffer()
        self.tf_ls = tf2_ros.TransformListener(self.tf_buff)
        self.tf_br = tf2_ros.TransformBroadcaster()

        self.drone_goto_client = actionlib.SimpleActionClient(
            drone_goto_action_ns, GoToAction)
        rospy.loginfo('Waiting for ' + drone_goto_action_ns)
        self.drone_goto_client.wait_for_server()

        # self.drone_shape_client = actionlib.SimpleActionClient(drone_shape_action_ns, FollowShapeAction)
        # rospy.loginfo('Waiting for ' + drone_shape_action_ns)
        # self.drone_shape_client.wait_for_server()

        self.drone_followme_client = actionlib.SimpleActionClient(
            drone_followme_action_ns, FollowMeAction)
        rospy.loginfo('Waiting for ' + drone_followme_action_ns)
        self.drone_followme_client.wait_for_server()

        self.relloc_server = actionlib.SimpleActionServer(
            action_ns + '/relloc_action', MotionRellocContAction,
            self.execute_relloc, False)
        self.relloc_server.start()

        self.relloc_cont_server = actionlib.SimpleActionServer(
            action_ns + '/relloc_cont_action', MotionRellocContAction,
            self.execute_relloc_cont, False)
        self.relloc_cont_server.start()

    def reset_state(self):
        with self.lock:
            self.deque.clear()
            self.estimated_tf = self.initial_guess
            self.cached_tf = None
            self.estimated = False
            self.tf_expired = True

    def robot_pose_cb(self, msg):
        self.robot_pose_msg = msg
        self.topic_sync_cb(self.robot_pose_msg)

    def execute_relloc(self, goal):
        loop_rate = rospy.Rate(10)  # 50 Hz

        if not self.robot_pose_msg:
            self.relloc_server.preempt_request = True
            self.relloc_server.set_aborted()
            return

        if self.relloc_cont_server.is_active():
            self.relloc_cont_server.preempt_request = True
            while not rospy.is_shutdown() or self.relloc_cont_server.is_active(
            ):
                loop_rate.sleep()

        self.trigger_cb(True)
        start_stamp = rospy.Time.now()

        time_thresh = rospy.Duration(3.0)
        res_thresh = np.radians(10.0)

        ts = None

        self.deque.clear()

        while not rospy.is_shutdown():
            if self.relloc_server.is_preempt_requested():
                # Trigger: True -> False (resets state)
                self.trigger_cb(False)

                self.relloc_server.set_preempted()
                rospy.logwarn('Relloc action has been preempted')
                break

            # Copy the queue
            cur_deque = list(self.deque)
            cur_length = len(cur_deque)

            # Do we have enough data yet?
            if cur_length < self.queue_size:
                loop_rate.sleep()
                continue

            # Take last 3 seconds out of 5
            cur_deque = cur_deque[-self.sample_size:]
            t, res_err = self.estimate_pose(cur_deque, constraints=False)

            self.cached_tf = t

            pose = PoseStamped()
            pose.header = t.header
            pose.pose.position.x = t.transform.translation.x
            pose.pose.position.y = t.transform.translation.y
            pose.pose.position.z = t.transform.translation.z
            pose.pose.orientation.x = t.transform.rotation.x
            pose.pose.orientation.y = t.transform.rotation.y
            pose.pose.orientation.z = t.transform.rotation.z
            pose.pose.orientation.w = t.transform.rotation.w

            feedback = MotionRellocContFeedback()
            feedback.estimate = pose
            feedback.residual_error = res_err  # in rad
            self.relloc_server.publish_feedback(feedback)

            if res_err < res_thresh:
                # Trigger
                self.relloc_server.set_succeeded(
                    result=MotionRellocContResult(pose))
                break
            else:
                # Trigger: True -> False (resets state)
                self.trigger_cb(False)

                rospy.logwarn('Residual error is too high: {}'.format(res_err))

                self.relloc_server.set_aborted(
                    text='Residual error is too high: {}'.format(res_err))
                return

    def execute_relloc_cont(self, goal):
        loop_rate = rospy.Rate(10)  # 50 Hz

        if not self.robot_pose_msg:
            self.relloc_cont_server.preempt_request = True
            self.relloc_cont_server.set_aborted()
            return

        if self.relloc_server.is_active():
            self.relloc_server.preempt_request = True
            while self.relloc_server.is_active():
                loop_rate.sleep()

        self.trigger_cb(True)
        start_stamp = rospy.Time.now()

        time_thresh = rospy.Duration(3.0)
        # res_thresh = 0.05
        res_thresh = np.radians(10.0)

        ts = None

        self.deque.clear()

        while not rospy.is_shutdown():
            if self.relloc_cont_server.is_preempt_requested():
                # Trigger: True -> False (resets state)
                self.trigger_cb(False)

                self.relloc_cont_server.set_preempted()
                rospy.logwarn('RellocCont action has been preempted')
                break

            # Copy the queue
            cur_deque = list(self.deque)
            cur_length = len(cur_deque)

            # Do we have enough data yet?
            if cur_length < self.sample_size:
                continue

            t, res_err = self.estimate_pose(cur_deque)

            # if not (t and res_err):
            #     continue

            self.cached_tf = t

            pose = PoseStamped()
            pose.header = t.header
            pose.pose.position.x = t.transform.translation.x
            pose.pose.position.y = t.transform.translation.y
            pose.pose.position.z = t.transform.translation.z
            pose.pose.orientation.x = t.transform.rotation.x
            pose.pose.orientation.y = t.transform.rotation.y
            pose.pose.orientation.z = t.transform.rotation.z
            pose.pose.orientation.w = t.transform.rotation.w

            if res_err < res_thresh:
                if ts is None:
                    ts = rospy.Time.now()
                if rospy.Time.now() > ts + time_thresh:
                    # rospy.loginfo('Estimated pose: {}'.format(pose))

                    # Trigger
                    self.relloc_cont_server.set_succeeded(
                        result=MotionRellocContResult(pose))
                    break
            else:
                ts = None

            feedback = MotionRellocContFeedback()
            feedback.estimate = pose
            feedback.residual_error = res_err  # in rad
            self.relloc_cont_server.publish_feedback(feedback)

            loop_rate.sleep()

    def trigger_cb(self, msg):
        self.last_trigger_val = self.trigger_val
        self.trigger_val = msg

        # if self.trigger_msg_eval:
        #     self.trigger_val = True if self.trigger_msg_eval(msg) else False
        # else:
        #     self.trigger_val = False

        if self.last_trigger_val != self.trigger_val:
            # rospy.logwarn(self.trigger_val)
            pass

        if not self.last_trigger_val and self.trigger_val:
            rospy.loginfo('Collecting new data')
            self.reset_state()

    def estimate_pose(self, pts, constraints=True):
        ########### FIXME ##############
        np.random.seed(0)
        ################################

        data = np.array(pts)

        sample_size = self.sample_size
        if sample_size > data.shape[0]:
            sample_size = data.shape[0]

        idx = np.random.choice(data.shape[0], size=sample_size, replace=False)
        tmp = data[idx, :]
        p = np.transpose(tmp[:, 0:3])
        qc = np.transpose(tmp[:, 3:6])
        qv = np.transpose(tmp[:, 6:9])

        # rospy.loginfo('{} {} {}'.format(p, qc, qv))

        max_p = np.max(p, axis=1)
        min_p = np.min(p, axis=1)
        motion_span = np.linalg.norm(max_p - min_p)
        # if motion_span > self.robot_motion_span_min:
        #     return None, None
        #     # rospy.logwarn('Robot motion span: %3.4f', motion_span)

        rospy.loginfo(
            'Estimating pose. Using {} of total {} data points'.format(
                sample_size, data.shape[0]))

        if constraints:
            res, maxerr = relloclib.estimate_pose(p, qc, qv, self.estimated_tf)
        else:
            res, maxerr = relloclib.estimate_pose_no_constraints(
                p, qc, qv, self.estimated_tf)
        self.estimated_tf = res.x

        rospy.loginfo("Average angular error (residual) in deg: {:.2f}".format(
            np.rad2deg(res.fun)))
        rospy.loginfo("Maximum angular error in deg: {:.2f}".format(
            np.rad2deg(maxerr)))
        rospy.loginfo(
            "Recovered transform (tx, ty, tz, rotz): {:.2f}, {:.2f}, {:.2f}, {:.2f}"
            .format(res.x[0], res.x[1], res.x[2], np.rad2deg(res.x[3])))

        est_quat = kdl.Rotation.RPY(0.0, 0.0, res.x[3]).GetQuaternion()
        est_tran = res.x[:3]

        t = TransformStamped()

        t.header.frame_id = self.human_frame
        t.child_frame_id = self.robot_root_frame
        t.header.stamp = rospy.Time.now()
        t.transform.translation.x = est_tran[0]
        t.transform.translation.y = est_tran[1]
        t.transform.translation.z = est_tran[2]
        t.transform.rotation.x = est_quat[0]
        t.transform.rotation.y = est_quat[1]
        t.transform.rotation.z = est_quat[2]
        t.transform.rotation.w = est_quat[3]

        self.estimated = True

        return t, res.fun
        # return t, maxerr

    def topic_sync_cb(self, robot_pose_msg):
        if not self.trigger_val:
            return

        if self.robot_pose_msg_eval:
            # Extract relevant fields from the message
            rpose = self.robot_pose_msg_eval(robot_pose_msg)
        else:
            rpose = robot_pose_msg

        robot_pos = None

        if isinstance(rpose, Pose):
            robot_pos = (rpose.position.x, rpose.position.y, rpose.position.z)
        else:
            rospy.logerr(
                'Wrong topic/field type for robot pose: {}/{}. Should be geometry_msgs/Pose'
                .format(
                    type(rpose).__module__.split('.')[0],
                    type(rpose).__name__))
            return

        try:
            origin_tf = self.tf_buff.lookup_transform(
                self.human_frame, self.ray_origin_frame,
                rospy.Time())  #robot_pose_msg.header.stamp)
            ray_tf = self.tf_buff.lookup_transform(
                self.human_frame, self.ray_direction_frame,
                rospy.Time())  #robot_pose_msg.header.stamp)
            if self.ray_inverse:
                unit_vector = kdl.Vector(-1.0, 0.0, 0.0)
            else:
                unit_vector = kdl.Vector(1.0, 0.0, 0.0)
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException), e:
            rospy.logerr(e)
            return

        orig = (origin_tf.transform.translation.x,
                origin_tf.transform.translation.y,
                origin_tf.transform.translation.z)

        quat = (ray_tf.transform.rotation.x, ray_tf.transform.rotation.y,
                ray_tf.transform.rotation.z, ray_tf.transform.rotation.w)

        frame = tfc.fromTf((orig, quat))

        p = list(robot_pos)
        q = [frame.p.x(), frame.p.y(), frame.p.z()]
        # Rotate unit vector in the direction of pointing
        v = frame.M * unit_vector

        q.extend([v.x(), v.y(), v.z()])
        p.extend(q)

        with self.lock:
            self.deque.append(p)
Ejemplo n.º 29
0
def transform2matrix(transform):
    T = tf_conversions.fromTf(transform)
    T = tf_conversions.toMatrix(T)
    return T
Ejemplo n.º 30
0
def get_transform_lists(bag_file_name):
    init_node()
    broadcaster = tf.TransformBroadcaster()
    listener = tf.TransformListener()
    listener.setUsingDedicatedThread(True)
    checkerboard_in_camera_trans = []
    wrist_in_body_trans = []
    #pdb.set_trace()
    camera_in_body_estimate = None
    checkerboard_to_wrist_estimate = None
    bag = rosbag.Bag(bag_file_name)

    tf_header_ids = set()
    tf_child_ids = set()

    for topic, message, time in bag.read_messages(topics=['tf', '/tf']):

        for tf_message_stamped in message.transforms:
            tf_message = tf_message_stamped.transform
            translation = [
                tf_message.translation.x, tf_message.translation.y,
                tf_message.translation.z
            ]
            rotation = [
                tf_message.rotation.x, tf_message.rotation.y,
                tf_message.rotation.z, tf_message.rotation.w
            ]
            broadcaster.sendTransform(translation, rotation, rospy.Time.now(),
                                      tf_message_stamped.child_frame_id,
                                      tf_message_stamped.header.frame_id)
            tf_message_stamped.header.stamp = rospy.Time.now()
            listener.setTransform(tf_message_stamped, "user")

        for tf_message in message.transforms:
            if tf_message.header.frame_id not in tf_header_ids:
                tf_header_ids.add(tf_message.header.frame_id)
                print 'found new frame %s' % tf_message.header.frame_id

            if tf_message.child_frame_id not in tf_child_ids:
                tf_child_ids.add(tf_message.child_frame_id)
                print 'found new child frame %s' % tf_message.child_frame_id

            if 'wrist_board' in tf_message.header.frame_id or 'wrist_board_corner' in tf_message.child_frame_id:
                print 'found keyframe'
                if camera_in_body_estimate is None:
                    try:
                        listener.waitForTransform('/camera_link', '/world',
                                                  rospy.Time(0),
                                                  rospy.Duration(.01))
                        camera_in_body_tf = listener.lookupTransform(
                            '/camera_link', '/world', rospy.Time(0))

                        print "got camera to world transform"
                        camera_in_body_estimate = tf_conversions.toMatrix(
                            tf_conversions.fromTf(camera_in_body_tf))
                    except:
                        print 'could not get camera to world transform, skipping. Are you sure you ran tf between camera_link and world?'
                        continue
                    print "got camera to world estimate"

                if checkerboard_to_wrist_estimate is None:
                    try:
                        listener.waitForTransform('/wrist_board',
                                                  '/wrist_board_corner',
                                                  rospy.Time(0),
                                                  rospy.Duration(.01))
                        #(trans, rot) = listener.lookupTransform('/wrist_board','/wrist_board_corner',rospy.Time(0))
                        #print trans
                        #print rot
                        checkerboard_to_wrist_tf = listener.lookupTransform(
                            '/wrist_board', '/wrist_board_corner',
                            rospy.Time(0))
                        #print checkerboard_to_wrist_tf
                        #raw_input("press a key")

                        #checkerboard_to_wrist_tf = ((1.75, -0.75, -0.121),(-0.09, -0.04, 0.73, 0.66))
                        #print 'yinxiao test'

                        print "got wristboad in wrist"
                        checkerboard_to_wrist_estimate = tf_conversions.toMatrix(
                            tf_conversions.fromTf(checkerboard_to_wrist_tf))

                    except:
                        print 'could not get wristboard to wrist_board_corner, skipping'
                        continue
                    print "got wristboard in wrist estimate"
                try:
                    listener.waitForTransform('/wrist_board', '/camera_link',
                                              rospy.Time(0),
                                              rospy.Duration(.01))
                    listener.waitForTransform('/wrist_board_corner', '/world',
                                              rospy.Time(0),
                                              rospy.Duration(.1))

                    checkerboard_tf = listener.lookupTransform(
                        '/wrist_board', '/camera_link', rospy.Time(0))
                    #print "got wristboard in camera"

                    checkerboard_in_camera_trans.append(
                        tf_conversions.toMatrix(
                            tf_conversions.fromTf(checkerboard_tf)))

                    #print "got left wrist in world"
                    wrist_in_body_tf = listener.lookupTransform(
                        '/wrist_board_corner', '/world', rospy.Time(0))
                    wrist_in_body_trans.append(
                        tf_conversions.toMatrix(
                            tf_conversions.fromTf(wrist_in_body_tf)))
                except:
                    continue
                #print "finished loop"

    return checkerboard_in_camera_trans, wrist_in_body_trans, camera_in_body_estimate, checkerboard_to_wrist_estimate
Ejemplo n.º 31
0
def callback_c2x_tf(data):
    """
    Stores the last c2x message, but transforms it beforehand.
    Transformation is only for position and velocity data, since this is the only data that is actively used by the
    program. The only exception is the plotting of bounding boxes. These will therefore not be rotated correctly, but
    their size will be correct anyway.
    """
    global history, steps, constant_velo, c2x_offset_x, c2x_offset_y
    tracks = data.tracks
    head = SimulatedVehicle.create_def_header(frame_id=src_id)
    # transformer: tf.TransformerROS
    for track in tracks:
        time_obj = rospy.Time(0)
        x_pos = track.box.center_x
        y_pos = track.box.center_y
        x_vel = track.box.velocity_x
        y_vel = track.box.velocity_y
        try:
            # Try to transform the point
            # Create a point with z=0 for the source frame (out of c2x tracks x/y) coordinate
            point = PointStamped(header=head,
                                 point=Point(x=x_pos, y=y_pos, z=0))
            # Don't use the current timestamp, use 0 to use the latest available tf data
            point.header.stamp = rospy.Time(0)
            # Now transform the point using the data
            tf_point = transformer.transformPoint(target_frame=dest_id,
                                                  ps=point)

            # Acquire the transformation matrix for this
            tf_mat = tf_c.toMatrix(
                tf_c.fromTf(
                    transformer.lookupTransform(target_frame=dest_id,
                                                source_frame=src_id,
                                                time=rospy.Time(0))))
            # print(tf_mat)

            # tf_vel stores the transformed velocity
            tf_vel = np.dot(tf_mat[0:2, 0:2], [x_vel, y_vel])
            # Update the track with the transformed data
            track.box.center_x = tf_point.point.x + c2x_offset_x
            track.box.center_y = tf_point.point.y + c2x_offset_y
            track.box.velocity_x = tf_vel[0]
            track.box.velocity_y = tf_vel[1]
            # DEBUG
            # print(str(track.box.center_x)+"\t"+str(track.box.center_y))
            # print("steps: "+str(steps)+"\tvelx: "+str(point_vel.point.x)+" vely: "+str(point_vel.point.y))
        except tf.ExtrapolationException as e:
            # Extrapolation error, print but keep going (possible just because only one frame was received so far)
            print(e)
        except tf.ConnectivityException as e:
            # print("Connectivity Exception during transform")
            print(e)
        except tf.LookupException as e:
            print(e)
            pass
    # Now change the frame_id to dest_id, since the tf was already performed
    data.header.frame_id = dest_id

    c2x.append(data)
    history.add(
        "c2x_0",
        data.tracks)  # Add the data to the history object under the name c2x_0

    # The following block adds several fake measurements to the c2x tracking that are time delayed to the original
    # measurement from the current timestep
    # They also have changed x/y data based on velocity, so that the track doesn't need to be reused later on with the
    # outdated coordinates and instead can work with "updated" coordinates for time steps that are not included in the
    # real c2x messages.
    add_points = 4  # how many "fake" points should be added between this and the next measuremen
    # Currently the number and timing of "fake" points are fixed. They are selected to fit the data sets 1&2, but
    # in general it can be better to use previous data to estimate the time difference and number of points
    if len(data.tracks
           ) > 0:  # only do the following if the track contained something
        for i in range(add_points):
            # add 4 more measurements, each based on the data, but with manipulated time and position based on velocity
            fake_data = copy.deepcopy(data)  # Create a copy of the data
            # now change the timestamp of this new data

            # c2x data arrives every 0.2 seconds, so to fit an additional 4 measurements
            time_shift = rospy.Duration(0,
                                        40000000)  # increase by 1/20 of a sec
            # this would need to change if it should automatically fit to data of other data sets.

            fake_data.header.stamp = fake_data.header.stamp + time_shift
            for track in fake_data.tracks:
                track.box.header.stamp = track.box.header.stamp + time_shift
                track.box.center_x += constant_velo * track.box.velocity_x * (
                    i + 1)
                track.box.center_y += constant_velo * track.box.velocity_y * (
                    i + 1)
            c2x.append(fake_data)
            history.add("c2x_0", fake_data.tracks)

    steps = 0
Ejemplo n.º 32
0
def flip_rotation_frame(trans, rot):
    f = tfc.fromTf((trans, rot))
    ir = f.M.Inverse()

    return tfc.toTf(tfc.Frame(ir, f.p))
    def get_camera_tf(self):

        camera_frame_id = '/%s_camera_frame_id'%(self.arm)
        tf_listener.waitForTransform('/base',camera_frame_id, rospy.Time(0), rospy.Duration(2))
        image_tf = tf_listener.lookupTransform('/base',image.header.frame_id, rospy.Time(0))
        self.camera_transform = tf_conversions.toMatrix((tf_conversions.fromTf(image_tf)))
Ejemplo n.º 34
0
 def initialize_frames(self):
   try:
     self.endpoint = tf_c.fromTf(self.listener_.lookupTransform('/world', '/endpoint', rospy.Time(0)))
   except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
     rospy.logwarn(str(e))
def get_transform_lists(bag_file_name):
    init_node()
    broadcaster = tf.TransformBroadcaster()
    listener = tf.TransformListener()
    listener.setUsingDedicatedThread(True)
    checkerboard_in_camera_trans = []
    wrist_in_body_trans = []
    #pdb.set_trace()
    camera_in_body_estimate = None
    checkerboard_to_wrist_estimate = None
    bag = rosbag.Bag(bag_file_name)

    tf_header_ids = set()
    tf_child_ids = set()

    for topic, message, time in bag.read_messages(topics=['tf', '/tf']):

        for tf_message_stamped in message.transforms:
            tf_message = tf_message_stamped.transform
            translation = [tf_message.translation.x, tf_message.translation.y, tf_message.translation.z]
            rotation = [tf_message.rotation.x, tf_message.rotation.y, tf_message.rotation.z, tf_message.rotation.w]
            broadcaster.sendTransform( translation, rotation, rospy.Time.now(), tf_message_stamped.child_frame_id, tf_message_stamped.header.frame_id)
            tf_message_stamped.header.stamp = rospy.Time.now()
            listener.setTransform(tf_message_stamped,"user")

        for tf_message in message.transforms:
            if tf_message.header.frame_id not in tf_header_ids:
                tf_header_ids.add(tf_message.header.frame_id)
                print 'found new frame %s' % tf_message.header.frame_id

            if tf_message.child_frame_id not in tf_child_ids:
                tf_child_ids.add(tf_message.child_frame_id)
                print 'found new child frame %s' % tf_message.child_frame_id

            if 'wrist_board' in tf_message.header.frame_id or 'wrist_board_corner' in tf_message.child_frame_id:
                print 'found keyframe'
                if camera_in_body_estimate is None:
                    try:
                        listener.waitForTransform('/camera_link','/world',rospy.Time(0), rospy.Duration(.01))
                        camera_in_body_tf = listener.lookupTransform('/camera_link','/world',rospy.Time(0))
                        
                        print "got camera to world transform"
                        camera_in_body_estimate = tf_conversions.toMatrix(tf_conversions.fromTf(camera_in_body_tf))
                    except:
                        print 'could not get camera to world transform, skipping. Are you sure you ran tf between camera_link and world?'
                        continue
                    print "got camera to world estimate"

                if checkerboard_to_wrist_estimate is None:
                    try:
                        listener.waitForTransform('/wrist_board','/wrist_board_corner',rospy.Time(0), rospy.Duration(.01))
                        #(trans, rot) = listener.lookupTransform('/wrist_board','/wrist_board_corner',rospy.Time(0))
                        #print trans
                        #print rot
                        checkerboard_to_wrist_tf = listener.lookupTransform('/wrist_board','/wrist_board_corner',rospy.Time(0))
                        #print checkerboard_to_wrist_tf
                        #raw_input("press a key")

                        #checkerboard_to_wrist_tf = ((1.75, -0.75, -0.121),(-0.09, -0.04, 0.73, 0.66))
                        #print 'yinxiao test'

                        print "got wristboad in wrist"
                        checkerboard_to_wrist_estimate = tf_conversions.toMatrix(tf_conversions.fromTf(checkerboard_to_wrist_tf))
                        
                    except:
                        print 'could not get wristboard to wrist_board_corner, skipping'
                        continue
                    print "got wristboard in wrist estimate"
                try:
                    listener.waitForTransform('/wrist_board','/camera_link',rospy.Time(0), rospy.Duration(.01))
                    listener.waitForTransform('/wrist_board_corner','/world',rospy.Time(0), rospy.Duration(.1))

                    checkerboard_tf = listener.lookupTransform('/wrist_board','/camera_link',rospy.Time(0))
                    #print "got wristboard in camera"

                    checkerboard_in_camera_trans.append(tf_conversions.toMatrix(tf_conversions.fromTf(checkerboard_tf)))

                    #print "got left wrist in world"
                    wrist_in_body_tf = listener.lookupTransform('/wrist_board_corner','/world',rospy.Time(0))
                    wrist_in_body_trans.append(tf_conversions.toMatrix(tf_conversions.fromTf(wrist_in_body_tf)))
                except:
                    continue
                #print "finished loop"

    return checkerboard_in_camera_trans, wrist_in_body_trans, camera_in_body_estimate, checkerboard_to_wrist_estimate