def pose2matrix(pose):
    p = pose
    if hasattr(pose, "pose"):
        p = pose.pose
    T = tf_conversions.fromMsg(pose.pose)
    T = tf_conversions.toMatrix(T)
    return T
Example #2
0
def graspit_grasp_pose_to_moveit_grasp_pose(move_group_commander, listener, graspit_grasp_msg,
                                            grasp_frame='/approach_tran'):
    """
    :param move_group_commander: A move_group command from which to get the end effector link.
    :type move_group_commander: moveit_commander.MoveGroupCommander
    :param listener: A transformer for looking up the transformation
    :type tf.TransformListener
    :param graspit_grasp_msg: A graspit grasp message
    :type graspit_grasp_msg: graspit_msgs.msg.Grasp

    """

    try:
        listener.waitForTransform(grasp_frame, move_group_commander.get_end_effector_link(),
                                     rospy.Time(0), timeout=rospy.Duration(1))
        at_to_ee_tran, at_to_ee_rot = listener.lookupTransform(grasp_frame, move_group_commander.get_end_effector_link(),rospy.Time())
    except:
        rospy.logerr("graspit_grasp_pose_to_moveit_grasp_pose::\n " +
                    "Failed to find transform from %s to %s"%(grasp_frame, move_group_commander.get_end_effector_link()))
        ipdb.set_trace()



    graspit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(tf_conversions.fromMsg(graspit_grasp_msg.final_grasp_pose))
    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS().fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    if move_group_commander.get_end_effector_link() == 'l_wrist_roll_link':
        rospy.logerr('This is a PR2\'s left arm so we have to rotate things.')
        pr2_is_weird_mat = tf.TransformerROS().fromTranslationRotation([0,0,0], tf.transformations.quaternion_from_euler(0,math.pi/2,0))
    else:
        pr2_is_weird_mat = tf.TransformerROS().fromTranslationRotation([0,0,0], [0,0,0])
    actual_ee_pose_matrix = np.dot( graspit_grasp_msg_final_grasp_tran_matrix, approach_tran_to_end_effector_tran_matrix)
    actual_ee_pose_matrix = np.dot(actual_ee_pose_matrix, pr2_is_weird_mat)
    actual_ee_pose = tf_conversions.toMsg(tf_conversions.fromMatrix(actual_ee_pose_matrix))
    rospy.loginfo("actual_ee_pose: " + str(actual_ee_pose))
    return actual_ee_pose
Example #3
0
    def callback(self, camera_info_msg, image_msg, camera_pose_msg):
        # camera parameters and image
        camera_matrix = numpy.float32(camera_info_msg.K).reshape(3, 3)
        distortion = numpy.float32(camera_info_msg.D).flatten()
        frame = self.cv_bridge.imgmsg_to_cv2(image_msg, 'bgr8')

        # camera pose
        cam2world = tf_conversions.toMatrix(
            tf_conversions.fromMsg(camera_pose_msg.pose))
        world2cam = numpy.linalg.inv(cam2world)

        tvec = world2cam[:3, 3]
        rvec, jacob = cv2.Rodrigues(world2cam[:3, :3])

        # estimates the scale of the environment
        ret, scale = self.scale_estimater.estimate(tvec)
        if not ret:
            cv2.putText(frame, 'estimating scale...', (10, 30),
                        cv2.FONT_HERSHEY_PLAIN, 3.0, (255, 255, 255))
        else:
            # assuming that the camera moves 20cm in the first 100 frames
            scale = scale / 0.2
            for cube in self.cubes:
                cube.draw(frame, camera_matrix, distortion, rvec, tvec, scale)

        self.frame = frame
        self.cam2world = cam2world
Example #4
0
def graspit_grasp_pose_to_moveit_grasp_pose(move_group_commander, listener, graspit_grasp_msg,
                                            grasp_frame='/approach_tran'):
    """
    :param move_group_commander: A move_group command from which to get the end effector link.
    :type move_group_commander: moveit_commander.MoveGroupCommander
    :param listener: A transformer for looking up the transformation
    :type tf.TransformListener 
    :param graspit_grasp_msg: A graspit grasp message
    :type graspit_grasp_msg: graspit_msgs.msg.Grasp

    """

    try:
        listener.waitForTransform(grasp_frame, move_group_commander.get_end_effector_link(),
                                     rospy.Time(0), timeout=rospy.Duration(1))
        at_to_ee_tran, at_to_ee_rot = listener.lookupTransform(grasp_frame, move_group_commander.get_end_effector_link(),rospy.Time())
    except:
        rospy.logerr("graspit_grasp_pose_to_moveit_grasp_pose::\n " +
                    "Failed to find transform from %s to %s"%(grasp_frame, move_group_commander.get_end_effector_link()))
        ipdb.set_trace()



    graspit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(tf_conversions.fromMsg(graspit_grasp_msg.final_grasp_pose))
    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS().fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    actual_ee_pose_matrix = np.dot( graspit_grasp_msg_final_grasp_tran_matrix, approach_tran_to_end_effector_tran_matrix)
    actual_ee_pose = tf_conversions.toMsg(tf_conversions.fromMatrix(actual_ee_pose_matrix))
    rospy.loginfo("actual_ee_pose: " + str(actual_ee_pose))
    return actual_ee_pose
Example #5
0
	def MoveTriad(self, msg):
		if self.axisbody is None:
			return
		f1 = tf_conversions.fromMsg(msg.pose)
		T = tf_conversions.toMatrix(f1)
		#T[0:3,3] = [-.5,0,0]
		self.axisbody.SetTransform(T)
def get_pose_as_tran(moveit_grasp_msg):
    """
    type: moveit_grasp_msg: moveit_msgs.msg.Grasp
    """
    assert isinstance(moveit_grasp_msg, moveit_msgs.msg.Grasp)
    pose_msg = moveit_grasp_msg.grasp_pose.pose
    return tf_conversions.toMatrix(tf_conversions.fromMsg(pose_msg))
Example #7
0
def get_pose_as_tran(moveit_grasp_msg):
    """
    type: moveit_grasp_msg: moveit_msgs.msg.Grasp
    """
    assert isinstance(moveit_grasp_msg, moveit_msgs.msg.Grasp)
    pose_msg = moveit_grasp_msg.grasp_pose.pose
    return tf_conversions.toMatrix(tf_conversions.fromMsg(pose_msg))
Example #8
0
def create_occupancy_grid_from_obstacles(obstacles,
                                         mins_xyz,
                                         step_size_xyz,
                                         dims_xyz,
                                         use_binvox=False):
    voxel_grid = np.zeros(shape=dims_xyz)

    for obstacle in obstacles:
        if use_binvox:
            vox = binvox_rw.read_as_3d_array(
                open(obstacle.mesh_filepath.replace('.ply', '.binvox'), 'r'))
            vertices = binvox_to_points(vox)
        else:
            vertices = read_vertex_points_from_ply_filepath(
                obstacle.mesh_filepath)
        frame = tf_conversions.fromMsg(obstacle.pose_stamped.pose)
        transform = tf_conversions.toMatrix(frame)
        vertices_transformed = transform_points(vertices, transform)

        if use_binvox:
            voxel_grid += add_obstacles_to_reachability_space_full_binvox(
                vertices_transformed, mins_xyz, step_size_xyz, dims_xyz)
        else:
            voxel_grid += add_obstacles_to_reachability_space_full(
                vertices_transformed, mins_xyz, step_size_xyz, dims_xyz)

    voxel_grid[np.where(voxel_grid > 0)] = 1
    return voxel_grid
    def make_pointcloud_proto(self, pointcloud_msg):
        """
        :param pointcloud_msg: input sensor_msgs.msg.PointCloud2
        :type pointcloud_msg: sensor_msgs.msg.PointCloud2
        :rtype: gen_proto.GraspitMessage_pb2.GraspitProtobufMessage
        """
        pointcloud_transform = tf_conversions.toMatrix(
            tf_conversions.fromTf(
                self.listener.lookupTransform(self.graspit_frame,
                                              pointcloud_msg.header.frame_id,
                                              rospy.Time(0))))
        rospy.loginfo(self.__class__.__name__ + " is relaying message")
        if self.downsample_factor != 1:
            self.create_uvs(pointcloud_msg)
        points = sensor_msgs.point_cloud2.read_points(pointcloud_msg, None,
                                                      False, self.uvs)
        #points = sensor_msgs.point_cloud2.read_points(pointcloud_msg)

        gm = gen_proto.GraspitMessage_pb2.GraspitProtobufMessage()
        #gm.renderable.pointCloud.points.add()
        gm.renderable.pointCloud.points.extend(
            [gen_proto.Renderable_pb2.Renderable.PointXYZRGB()] *
            (pointcloud_msg.width * pointcloud_msg.height /
             (self.downsample_factor**2)))

        #renderable = gen_proto.Renderable_pb2.Renderable()
        #pointcloud = gen_proto.Renderable_pb2.Renderable.PointCloudXYZRGB()
        for ind, point in enumerate(points):
            #pointXYZRGB = gen_proto.Renderable_pb2.Renderable.PointXYZRGB()

            color = self.get_color_converted_point(point)

            point_location = numpy.dot(
                pointcloud_transform,
                numpy.array([point[:3] + (1, )]).transpose())
            renderable = gm.renderable
            assert isinstance(renderable, gen_proto.Renderable_pb2.Renderable)
            pointCloud = renderable.pointCloud
            assert isinstance(
                pointCloud,
                gen_proto.Renderable_pb2.Renderable.PointCloudXYZRGB)
            points = pointCloud.points
            assert isinstance(points,
                              gen_proto.Renderable_pb2.Renderable.PointXYZRGB)
            pointCloud.points[ind].point.x = point_location[0, 0]
            pointCloud.points[ind].point.y = point_location[1, 0]
            pointCloud.points[ind].point.z = point_location[2, 0]

            pointCloud.points[ind].color.red = color[0]
            pointCloud.points[ind].color.green = color[1]
            pointCloud.points[ind].color.blue = color[2]

        renderable.pointCloud.units = 1.0

        rospy.loginfo(
            str(self.__class__) +
            "::GraspitProtobufSocket:: Finished building protobuf" +
            "pointcloud message")
        return gm
Example #10
0
    def top_cam_tag_callback(self, tag_detections):
        #rospy.loginfo("Received top cam tag!")

        for detection in tag_detections.detections:
            self.top_cam_to_table_stamp = tag_detections.header.stamp
            self.top_cam_tag_found = True
            tag_id = detection.id[0]
            self.top_cam_to_table[tag_id] = tfc.toMatrix(tfc.fromMsg(detection.pose.pose.pose))
 def try_get_world_transform(self):
     try:
         self.transform_listener.waitForTransform("/camera_rgb_optical_frame", "/world", rospy.Time(0),rospy.Duration(.1))
     except:
         return False
     self.world_transform = tf_conversions.toMatrix(tf_conversions.fromTf(
         self.transform_listener.lookupTransform(
             "/camera_rgb_optical_frame",'/world', rospy.Time(0))))
     return True
    def update_table(self):
        try:
            trans = tf_conversions.toMatrix(tf_conversions.fromTf(self.transform_listener.lookupTransform('world','object', rospy.Time(0))))
            trans[0:3,3] *= 1000 
            trans[2,3] -=  70
            self.graspit_commander.set_body_trans('experiment_table', trans)

        except:
            pdb.post_mortem()
Example #13
0
def transform_pose(grasp_pose_in_world, world_frame_to_object_frame_transform):

    # Convert ROSmsg to 4x4 numpy arrays
    grasp_pose_in_world_tran_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(grasp_pose_in_world))
    object_pose_in_world_tran_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(world_frame_to_object_frame_transform))

    # Transform the grasp pose into the object reference frame
    grasp_pose_in_object_tran_matrix = np.dot(
        np.linalg.inv(object_pose_in_world_tran_matrix),
        grasp_pose_in_world_tran_matrix)

    # Convert pose back into ROS message
    grasp_pose_in_object = tf_conversions.toMsg(
        tf_conversions.fromMatrix(grasp_pose_in_object_tran_matrix))

    return grasp_pose_in_object
def xyzrpy_to_xform(xyzrpy):

    r = tf_conversions.Rotation.RPY(*xyzrpy[3:])
    pose = Pose(Point(*xyzrpy[:3]), Quaternion(*r.GetQuaternion()))
    frame = tf_conversions.fromMsg(pose)
    transform = tf_conversions.toMatrix(frame)
    rotMatrix = transform[0:3, 0:3].flatten().tolist()
    tran = [pose.position.x, pose.position.y, pose.position.z]

    return rotMatrix, tran
Example #15
0
def get_original_grasp_tran(moveit_grasp_msg):
    listener = tf.listener.TransformListener()
    print '1'
    listener.waitForTransform("approach_tran", 'staubli_rx60l_link7', rospy.Time(), timeout=rospy.Duration(1.0))
    at_to_ee_tran, at_to_ee_rot = listener.lookupTransform("approach_tran", 'staubli_rx60l_link7',rospy.Time())
    print '2'
    moveit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(tf_conversions.fromMsg(moveit_grasp_msg.grasp_pose.pose))
    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS().fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    original_ee_pose_matrix = np.dot( moveit_grasp_msg_final_grasp_tran_matrix, numpy.linalg.inv(approach_tran_to_end_effector_tran_matrix))
    return original_ee_pose_matrix
 def _get_world_transform(self):
     try:
         self._transform_listener.waitForTransform("/camera_rgb_optical_frame", "/world", rospy.Time(0), rospy.Duration(10))
     except:
         rospy.logwarn("Failed to get world transform for: " + self.__class__.__name())
         return None
     world_transform = tf_conversions.toMatrix(tf_conversions.fromTf(
         self._transform_listener.lookupTransform(
             "/camera_rgb_optical_frame", '/world', rospy.Time(0))))
     return world_transform
Example #17
0
def get_world_transform():
    transform_listener = tf.TransformListener()
    try:
        transform_listener.waitForTransform("/camera_rgb_optical_frame", "/world", rospy.Time(0), rospy.Duration(10))
    except:
        rospy.logwarn("Failed to get world transform for: ")
        return None
    world_transform = tf_conversions.toMatrix(tf_conversions.fromTf(
            transform_listener.lookupTransform(
            "/camera_rgb_optical_frame", '/world', rospy.Time(0))))
    return world_transform
    def get_most_recent_transform(self):

        left_pose, left_time = self.left_arm_tf_updater.current_pose_and_time
        right_pose, right_time = self.left_arm_tf_updater.current_pose_and_time

        if(right_time > left_time):
            pose = right_pose
            camera_transform = self.right_arm_tf_updater.camera_transform
        else:
            pose = left_pose
            camera_transform = self.left_arm_tf_updater.camera_transform
       
        transform = None
        if(pose):
            checkerboard_in_camera = tf_conversions.toMatrix((tf_conversions.fromMsg(pose)))
            checkerboard_in_body = camera_transform * checkerboard_in_camera
            checkerboard_in_body_tf = tf_conversions.toTf(tf_conversions.toMatrix(checkerboard_in_body))
            checkerboard_rpy = tf.transformations.quaternion_to_euler(*checkerboard_in_body_tf[1])
            transform = checkboard_body_in_tf[0] + checkerboard_rpy

        return transform
Example #19
0
 def _get_world_transform(self):
     try:
         self._transform_listener.waitForTransform(
             "/camera_rgb_optical_frame", "/world", rospy.Time(0),
             rospy.Duration(10))
     except:
         rospy.logwarn("Failed to get world transform for: " +
                       self.__class__.__name())
         return None
     world_transform = tf_conversions.toMatrix(
         tf_conversions.fromTf(
             self._transform_listener.lookupTransform(
                 "/camera_rgb_optical_frame", '/world', rospy.Time(0))))
     return world_transform
def graspit_grasp_pose_to_moveit_grasp_pose(move_group_commander,
                                            listener,
                                            graspit_grasp_msg,
                                            grasp_frame='/approach_tran'):
    """
    :param move_group_commander: A move_group command from which to get the end effector link.
    :type move_group_commander: moveit_commander.MoveGroupCommander
    :param listener: A transformer for looking up the transformation
    :type tf.TransformListener
    :param graspit_grasp_msg: A graspit grasp message
    :type graspit_grasp_msg: graspit_msgs.msg.Grasp

    """

    try:
        listener.waitForTransform(grasp_frame,
                                  move_group_commander.get_end_effector_link(),
                                  rospy.Time(0),
                                  timeout=rospy.Duration(1))
        at_to_ee_tran, at_to_ee_rot = listener.lookupTransform(
            grasp_frame, move_group_commander.get_end_effector_link(),
            rospy.Time())
    except:
        rospy.logerr(
            "graspit_grasp_pose_to_moveit_grasp_pose::\n " +
            "Failed to find transform from %s to %s" %
            (grasp_frame, move_group_commander.get_end_effector_link()))
        ipdb.set_trace()

    graspit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(graspit_grasp_msg.final_grasp_pose))
    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS(
    ).fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    if move_group_commander.get_end_effector_link() == 'l_wrist_roll_link':
        rospy.logerr('This is a PR2\'s left arm so we have to rotate things.')
        pr2_is_weird_mat = tf.TransformerROS().fromTranslationRotation(
            [0, 0, 0],
            tf.transformations.quaternion_from_euler(0, math.pi / 2, 0))
    else:
        pr2_is_weird_mat = tf.TransformerROS().fromTranslationRotation(
            [0, 0, 0], [0, 0, 0])
    actual_ee_pose_matrix = np.dot(graspit_grasp_msg_final_grasp_tran_matrix,
                                   approach_tran_to_end_effector_tran_matrix)
    actual_ee_pose_matrix = np.dot(actual_ee_pose_matrix, pr2_is_weird_mat)
    actual_ee_pose = tf_conversions.toMsg(
        tf_conversions.fromMatrix(actual_ee_pose_matrix))
    rospy.loginfo("actual_ee_pose: " + str(actual_ee_pose))
    return actual_ee_pose
    def make_pointcloud_proto(self, pointcloud_msg):
        """
        :param pointcloud_msg: input sensor_msgs.msg.PointCloud2
        :type pointcloud_msg: sensor_msgs.msg.PointCloud2
        :rtype: gen_proto.GraspitMessage_pb2.GraspitProtobufMessage
        """
        pointcloud_transform = tf_conversions.toMatrix(tf_conversions.fromTf(self.listener.lookupTransform(
            self.graspit_frame, pointcloud_msg.header.frame_id, rospy.Time(0))))
        rospy.loginfo(self.__class__.__name__ + " is relaying message")
        if self.downsample_factor != 1:
            self.create_uvs(pointcloud_msg)
        points = sensor_msgs.point_cloud2.read_points(pointcloud_msg, None, False, self.uvs)
        #points = sensor_msgs.point_cloud2.read_points(pointcloud_msg)

        gm = gen_proto.GraspitMessage_pb2.GraspitProtobufMessage()
        #gm.renderable.pointCloud.points.add()
        gm.renderable.pointCloud.points.extend(
            [gen_proto.Renderable_pb2.Renderable.PointXYZRGB()] *
            (pointcloud_msg.width*pointcloud_msg.height/(self.downsample_factor**2)))


        #renderable = gen_proto.Renderable_pb2.Renderable()
        #pointcloud = gen_proto.Renderable_pb2.Renderable.PointCloudXYZRGB()
        for ind,point in enumerate(points):
            #pointXYZRGB = gen_proto.Renderable_pb2.Renderable.PointXYZRGB()

            color = self.get_color_converted_point(point)

            point_location = numpy.dot(pointcloud_transform, numpy.array([point[:3] + (1,)]).transpose())
            renderable = gm.renderable
            assert isinstance(renderable, gen_proto.Renderable_pb2.Renderable)
            pointCloud = renderable.pointCloud
            assert isinstance(pointCloud, gen_proto.Renderable_pb2.Renderable.PointCloudXYZRGB)
            points = pointCloud.points
            assert isinstance(points, gen_proto.Renderable_pb2.Renderable.PointXYZRGB)
            pointCloud.points[ind].point.x = point_location[0, 0]
            pointCloud.points[ind].point.y = point_location[1, 0]
            pointCloud.points[ind].point.z = point_location[2, 0]

            pointCloud.points[ind].color.red = color[0]
            pointCloud.points[ind].color.green = color[1]
            pointCloud.points[ind].color.blue = color[2]

        renderable.pointCloud.units = 1.0

        rospy.loginfo(str(self.__class__) + "::GraspitProtobufSocket:: Finished building protobuf"
                                          + "pointcloud message")
        return gm
Example #22
0
 def servo_to_pose_call(self,req): 
     if self.driver_status == 'SERVO':
         rospy.logwarn(req)
         self.F_command = tf_c.fromMsg(req.target)
         M_command = tf_c.toMatrix(self.F_command)
         # Calculate IK
         joint_positions = self.kdl_kin.inverse(M_command, self.q) # inverse kinematics
         if joint_positions is not None:
             pose_sol = self.kdl_kin.forward(joint_positions) # should equal pose
             self.q = joint_positions
         else:
             rospy.logwarn('no solution found')
         # self.send_command(F_command)
         return 'SUCCESS - moved to pose'
     else:
         rospy.logerr('SIMPLE UR -- Not in servo mode')
         return 'FAILED - not in servo mode'
def get_original_grasp_tran(moveit_grasp_msg):
    listener = tf.listener.TransformListener()
    print '1'
    listener.waitForTransform("approach_tran",
                              'staubli_rx60l_link7',
                              rospy.Time(),
                              timeout=rospy.Duration(1.0))
    at_to_ee_tran, at_to_ee_rot = listener.lookupTransform(
        "approach_tran", 'staubli_rx60l_link7', rospy.Time())
    print '2'
    moveit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(moveit_grasp_msg.grasp_pose.pose))
    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS(
    ).fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    original_ee_pose_matrix = np.dot(
        moveit_grasp_msg_final_grasp_tran_matrix,
        numpy.linalg.inv(approach_tran_to_end_effector_tran_matrix))
    return original_ee_pose_matrix
def graspit_grasp_pose_to_moveit_grasp_pose(
        listener,                     # type: tf.TransformListener
        graspit_grasp_msg,            # type: graspit_msgs.msg.Grasp
        end_effector_link,            # type: str
        grasp_frame                   # type: str
):
    # type: (...) -> geometry_msgs.msg.Pose
    """
    :param listener: A transformer for looking up the transformation
    :param graspit_grasp_msg: A graspit grasp message
    """

    try:
        listener.waitForTransform(
            grasp_frame,
            end_effector_link,
            rospy.Time(0),
            timeout=rospy.Duration(1)
        )

        at_to_ee_tran, at_to_ee_rot = listener.lookupTransform(
            grasp_frame,
            end_effector_link,
            rospy.Time()
        )

    except Exception as e:
        rospy.logerr("graspit_grasp_pose_to_moveit_grasp_pose::\n "
                     "Failed to find transform from {} to {}"
                     .format(grasp_frame, end_effector_link)
                     )
        ipdb.set_trace()

        return geometry_msgs.msg.Pose()

    graspit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(graspit_grasp_msg.final_grasp_pose)
    )

    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS().fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    actual_ee_pose_matrix = np.dot(graspit_grasp_msg_final_grasp_tran_matrix, approach_tran_to_end_effector_tran_matrix)
    actual_ee_pose = tf_conversions.toMsg(tf_conversions.fromMatrix(actual_ee_pose_matrix))

    return actual_ee_pose
Example #25
0
    def update_from_marker(self):
        try:
            F_target_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/endpoint_interact',rospy.Time(0)))
            F_target_base = tf_c.fromTf(self.listener_.lookupTransform('/base_link','/endpoint_interact',rospy.Time(0)))
            F_base_world = tf_c.fromTf(self.listener_.lookupTransform('/world','/base_link',rospy.Time(0)))
            self.F_command = F_base_world.Inverse()*F_target_world
            M_command = tf_c.toMatrix(self.F_command)

            joint_positions = self.kdl_kin.inverse(M_command, self.q) # inverse kinematics
            if joint_positions is not None:
                pose_sol = self.kdl_kin.forward(joint_positions) # should equal pose
                self.q = joint_positions
            else:
                rospy.logwarn('no solution found')

            # self.send_command()

        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
            rospy.logwarn(str(e))
def graspit_grasp_pose_to_moveit_grasp_pose(
        listener,  # type: tf.TransformListener
        graspit_grasp_msg,  # type: graspit_interface.msg.Grasp
        end_effector_link,  # type: str
        grasp_frame  # type: str
):
    # type: (...) -> geometry_msgs.msg.Pose
    """
    :param listener: A transformer for looking up the transformation
    :param graspit_grasp_msg: A graspit grasp message
    """

    try:
        listener.waitForTransform(grasp_frame,
                                  end_effector_link,
                                  rospy.Time(0),
                                  timeout=rospy.Duration(1))

        at_to_ee_tran, at_to_ee_rot = listener.lookupTransform(
            grasp_frame, end_effector_link, rospy.Time())

    except Exception as e:
        rospy.logerr("graspit_grasp_pose_to_moveit_grasp_pose::\n "
                     "Failed to find transform from {} to {}".format(
                         grasp_frame, end_effector_link))
        ipdb.set_trace()

        return geometry_msgs.msg.Pose()

    graspit_grasp_msg_final_grasp_tran_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(graspit_grasp_msg.pose))

    approach_tran_to_end_effector_tran_matrix = tf.TransformerROS(
    ).fromTranslationRotation(at_to_ee_tran, at_to_ee_rot)
    actual_ee_pose_matrix = np.dot(graspit_grasp_msg_final_grasp_tran_matrix,
                                   approach_tran_to_end_effector_tran_matrix)
    actual_ee_pose = tf_conversions.toMsg(
        tf_conversions.fromMatrix(actual_ee_pose_matrix))

    return actual_ee_pose
Example #27
0
    def callback(self, camera_info_msg, image_msg, camera_pose_msg):
        # wait ufor plane parameters
        if self.plane is None:
            return

        # camera parameters and image
        camera_matrix = numpy.float32(camera_info_msg.K).reshape(3, 3)
        distortion = numpy.float32(camera_info_msg.D).flatten()
        frame = self.cv_bridge.imgmsg_to_cv2(image_msg, 'bgr8')

        # camera pose
        cam2world = tf_conversions.toMatrix(
            tf_conversions.fromMsg(camera_pose_msg.pose))
        world2cam = numpy.linalg.inv(cam2world)

        tvec = world2cam[:3, 3]
        rvec, jacob = cv2.Rodrigues(world2cam[:3, :3])

        self.plane.draw(frame, camera_matrix, distortion, rvec, tvec)
        for cube in self.cubes:
            cube.draw(frame, camera_matrix, distortion, rvec, tvec, self.plane)

        self.frame = frame
def get_graspit_grasp_pose_in_new_reference_frame(
        graspit_grasp_msg_pose,  # type: geometry_msgs.msg.Pose
        target_to_source_translation_rotation,  # type: tuple of tuples
):
    # type: (...) -> geometry_msgs.msg.Pose
    """
    :param target_to_source_translation_rotation: result of listener.lookupTransform((target_frame, grasp_frame, rospy.Time(0), timeout=rospy.Duration(1))
    :param graspit_grasp_msg_pose: The pose of a graspit grasp message i.e. g.pose
    t_T_G = t_T_s * s_T_G
    """

    graspit_grasp_pose_in_source_frame_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(graspit_grasp_msg_pose)
    )

    source_in_target_frame_tran_matrix = tf.TransformerROS().fromTranslationRotation(
        target_to_source_translation_rotation[0],
        target_to_source_translation_rotation[1])
    graspit_grasp_pose_in_target_frame_matrix = np.dot(source_in_target_frame_tran_matrix,
                                                       graspit_grasp_pose_in_source_frame_matrix)  # t_T_G = t_T_s * s_T_G
    graspit_grasp_pose_in_target_frame = tf_conversions.toMsg(
        tf_conversions.fromMatrix(graspit_grasp_pose_in_target_frame_matrix))

    return graspit_grasp_pose_in_target_frame
def change_end_effector_link(
        graspit_grasp_msg_pose,  # type: geometry_msgs.msg.Pose
        old_link_to_new_link_translation_rotation,  # type: tuple of tuples
):
    # type: (...) -> geometry_msgs.msg.Pose
    """
    :param old_link_to_new_link_translation_rotation: result of listener.lookupTransform((old_link, new_link, rospy.Time(0), timeout=rospy.Duration(1))
    :param graspit_grasp_msg_pose: The pose of a graspit grasp message i.e. g.pose
    ref_T_nl = ref_T_ol * ol_T_nl
    """

    graspit_grasp_pose_for_old_link_matrix = tf_conversions.toMatrix(
        tf_conversions.fromMsg(graspit_grasp_msg_pose)
    )

    old_link_to_new_link_tranform_matrix = tf.TransformerROS().fromTranslationRotation(
        old_link_to_new_link_translation_rotation[0],
        old_link_to_new_link_translation_rotation[1])
    graspit_grasp_pose_for_new_link_matrix = np.dot(graspit_grasp_pose_for_old_link_matrix,
                                                    old_link_to_new_link_tranform_matrix)  # ref_T_nl = ref_T_ol * ol_T_nl
    graspit_grasp_pose_for_new_link = tf_conversions.toMsg(
        tf_conversions.fromMatrix(graspit_grasp_pose_for_new_link_matrix))

    return graspit_grasp_pose_for_new_link
Example #30
0
	def handle_pose_check(self, msg):
		#TODO: fix the msg input and function calls used in this node
		#self.MoveTriad(msg)
		#self.env.RemoveKinBody(self.object)
	
		#rospy.loginfo("handle_pose function called")
		#rospy.logdebug(msg)

		myrot = PyKDL.Rotation.Quaternion(msg.orientation.w, msg.orientation.x, msg.orientation.y, msg.orientation.z)
		r,p,y = PyKDL.Rotation.GetRPY(myrot)
		myrot = PyKDL.Rotation.RPY(r, -p, -y)
		MyFrame = PyKDL.Frame( myrot , PyKDL.Vector(msg.position.x, msg.position.y, msg.position.z) ) 
		FrameMsg1 = tf_conversions.toMsg(MyFrame)
		MyMatrix = tf_conversions.toMatrix(MyFrame)

		# Current location of RAVE model hand
		tmp = self.manip.GetTransform()

		#rospy.logdebug("\n\t\t====Current Matrix====\n" + str(tmp))

		# Convert the message to a matrix, MyMatrix will be used to find an IK solution
		f1 = tf_conversions.fromMsg(msg)
		MyMatrix = tf_conversions.toMatrix(f1)

#		rospy.logdebug("\n\t\t====Matrix For Soln====\n" + str(MyMatrix))
		
		# Spread values unused while working with trigrip
		#if msg.spread or msg.spread == 0:
		#	self.robot.SetDOFValues([msg.spread],[10])

		with self.env:   # Check to see if hand position is in collision.  If so, reset hand position.
			oldJoints = self.robot.GetDOFValues()
			#rospy.logdebug("\n\t====Old joints====\n" + str(oldJoints))
			try:	
				
				IKtype = IkParameterizationType.Transform6D
				#rospy.logdebug(IkParameterization(MyMatrix, IKtype))
				
				# Finds all solutions for a given point and orientation in space
				solns = self.manip.FindIKSolutions(IkParameterization(MyMatrix, IKtype), filteroptions=IkFilterOptions.CheckEnvCollisions)
				#rospy.loginfo("\n\t===solns===\n" + str(solns))

				# Holder for a better solution
				soln = None
				if solns != []:	    
					# Try to find the solution with the least change in joints 1 and 2, tries to minimize jumping
					soln = self.find_soln(solns, oldJoints)
				
				
				# Could be used in lieu of FindIKSolutions, finds just one solution, not necessarily the best
				#soln = self.manip.FindIKSolution(IkParameterization(MyMatrix, IKtype), filteroptions=IkFilterOptions.CheckEnvCollisions)
				
				# We have a solution to apply to the robot
				if(soln != None):
					#rospy.loginfo("\n\t===soln===\n" + str(soln))
					self.robot.SetDOFValues(soln, self.manip.GetArmIndices())
					#Send Command to the physical robot
					if(msg.move):
						# Displays solutions used when A button is pressed
						rospy.loginfo("\n\t===Joint Solution===\n" + str(soln))
						self.send_cmd(soln)

				else:
					rospy.loginfo("No IK solution found")
					#self.env.AddKinBody(self.object)
					return osu_ros_adept.srv.PoseCheckResponse(False,[0,0,0])
			except e:
				rospy.loginfo("IK not available")
				#rospy.logdebug(e)
			report=CollisionReport()

			for link in self.robot.GetLinks():
				linkCollision = self.env.CheckCollision(link,report)
				if linkCollision:
					rospy.logdebug("Link collision " + str(linkCollision) + "Link: " + str(link))

				# Wam code, not used with Adept/Trigrip
				if "wam0" in str(link): # If looking at the base of the WAM, skip and continue with next link
					continue

				incollision=self.env.CheckCollision(link,report)
				if incollision:# and msg.j_position[1] == 0 and msg.j_position[2] == 0 or incollision and msg.j_position[3] != 0:
					rospy.logdebug("Incollision")
					# Detects collision, reset joints
					self.robot.SetDOFValues(oldJoints)
					rospy.loginfo("Collision detected")
					
					#self.env.AddKinBody(self.object)
					return PoseCheckResponse(False,[0,0,0])
		#self.MoveTriad(msg)
		#if not msg.move: # if not moving, reset robot to old Joint values
		#	self.robot.SetDOFValues(oldJoints)
		
		if msg.get_norm:  # Validated: Return norm is useful when using the Interactive markers to control the WAM
			vals = MyMatrix[0:3,2]
			#self.env.AddKinBody(self.object)
			return PoseCheckResponse(True, vals)
		#self.env.AddKinBody(self.object)
		return PoseCheckResponse(True,[0,0,0])
Example #31
0
def pose_to_transform_array(pose):
    # TODO: add to Curvox
    frame = tf_conversions.fromMsg(pose)
    transform = tf_conversions.toMatrix(frame)
    return transform
Example #32
0
def get_transform_lists(bag_file_name):
    init_node()
    broadcaster = tf.TransformBroadcaster()
    listener = tf.TransformListener()
    listener.setUsingDedicatedThread(True)
    checkerboard_in_camera_trans = []
    wrist_in_body_trans = []
    #pdb.set_trace()
    camera_in_body_estimate = None
    checkerboard_to_wrist_estimate = None
    bag = rosbag.Bag(bag_file_name)

    tf_header_ids = set()
    tf_child_ids = set()

    for topic, message, time in bag.read_messages(topics=['tf', '/tf']):

        for tf_message_stamped in message.transforms:
            tf_message = tf_message_stamped.transform
            translation = [
                tf_message.translation.x, tf_message.translation.y,
                tf_message.translation.z
            ]
            rotation = [
                tf_message.rotation.x, tf_message.rotation.y,
                tf_message.rotation.z, tf_message.rotation.w
            ]
            broadcaster.sendTransform(translation, rotation, rospy.Time.now(),
                                      tf_message_stamped.child_frame_id,
                                      tf_message_stamped.header.frame_id)
            tf_message_stamped.header.stamp = rospy.Time.now()
            listener.setTransform(tf_message_stamped, "user")

        for tf_message in message.transforms:
            if tf_message.header.frame_id not in tf_header_ids:
                tf_header_ids.add(tf_message.header.frame_id)
                print 'found new frame %s' % tf_message.header.frame_id

            if tf_message.child_frame_id not in tf_child_ids:
                tf_child_ids.add(tf_message.child_frame_id)
                print 'found new child frame %s' % tf_message.child_frame_id

            if 'wrist_board' in tf_message.header.frame_id or 'wrist_board_corner' in tf_message.child_frame_id:
                print 'found keyframe'
                if camera_in_body_estimate is None:
                    try:
                        listener.waitForTransform('/camera_link', '/world',
                                                  rospy.Time(0),
                                                  rospy.Duration(.01))
                        camera_in_body_tf = listener.lookupTransform(
                            '/camera_link', '/world', rospy.Time(0))

                        print "got camera to world transform"
                        camera_in_body_estimate = tf_conversions.toMatrix(
                            tf_conversions.fromTf(camera_in_body_tf))
                    except:
                        print 'could not get camera to world transform, skipping. Are you sure you ran tf between camera_link and world?'
                        continue
                    print "got camera to world estimate"

                if checkerboard_to_wrist_estimate is None:
                    try:
                        listener.waitForTransform('/wrist_board',
                                                  '/wrist_board_corner',
                                                  rospy.Time(0),
                                                  rospy.Duration(.01))
                        #(trans, rot) = listener.lookupTransform('/wrist_board','/wrist_board_corner',rospy.Time(0))
                        #print trans
                        #print rot
                        checkerboard_to_wrist_tf = listener.lookupTransform(
                            '/wrist_board', '/wrist_board_corner',
                            rospy.Time(0))
                        #print checkerboard_to_wrist_tf
                        #raw_input("press a key")

                        #checkerboard_to_wrist_tf = ((1.75, -0.75, -0.121),(-0.09, -0.04, 0.73, 0.66))
                        #print 'yinxiao test'

                        print "got wristboad in wrist"
                        checkerboard_to_wrist_estimate = tf_conversions.toMatrix(
                            tf_conversions.fromTf(checkerboard_to_wrist_tf))

                    except:
                        print 'could not get wristboard to wrist_board_corner, skipping'
                        continue
                    print "got wristboard in wrist estimate"
                try:
                    listener.waitForTransform('/wrist_board', '/camera_link',
                                              rospy.Time(0),
                                              rospy.Duration(.01))
                    listener.waitForTransform('/wrist_board_corner', '/world',
                                              rospy.Time(0),
                                              rospy.Duration(.1))

                    checkerboard_tf = listener.lookupTransform(
                        '/wrist_board', '/camera_link', rospy.Time(0))
                    #print "got wristboard in camera"

                    checkerboard_in_camera_trans.append(
                        tf_conversions.toMatrix(
                            tf_conversions.fromTf(checkerboard_tf)))

                    #print "got left wrist in world"
                    wrist_in_body_tf = listener.lookupTransform(
                        '/wrist_board_corner', '/world', rospy.Time(0))
                    wrist_in_body_trans.append(
                        tf_conversions.toMatrix(
                            tf_conversions.fromTf(wrist_in_body_tf)))
                except:
                    continue
                #print "finished loop"

    return checkerboard_in_camera_trans, wrist_in_body_trans, camera_in_body_estimate, checkerboard_to_wrist_estimate
Example #33
0
	def UpdateRobotJoints(self, msg):
		msg2 = None
		#rospy.loginfo('command received')

		MyTrans = self.manip.GetTransform()   # Get the hand transform
		#ftest = tf_conversions.fromMsg(MyTrans)
		#ftest = tf_conversions.toMatrix(ftest)	
		
		f1 = tf_conversions.fromMsg(msg)
		MyMatrix = tf_conversions.toMatrix(f1)


		# Create message to set robot commands
		move_msg = PoseCheck()
		# Copy Postition/Orientation from RobotPose msg to PoseCheck srv
		move_msg.position = msg.position
		move_msg.orientation = msg.orientation
		move_msg.spread = 0.0

		# Check if A button was pressed, if so initiate robot movement
		if(msg.j_position[0]):
			move_msg.move = True
		else:
			move_msg.move = False	
		# not currently used so set to false by default		
		move_msg.get_norm = False
		
		# PRE OSU_ROS_ADEPT CODE, NOT USED IN ADEPT/TRIGRIP PROGRAM

		# Function to run if using Interactive Markers
		if msg.j_position[3] != 0:
			msg2 = PoseCheck()
			mycog = self.object.GetTransform()
			#Add offset of object and subtract a small amount to fix minor Z offset error.
			MyMatrix[0:3,3] = MyMatrix[0:3,3] + mycog[0:3,3] - MyMatrix[0:3,2]*.0175
			f2 = tf_conversions.fromMatrix(MyMatrix)   # Convert the transform to a ROS frame
			msg2.pose = tf_conversions.toMsg(f2) 
			msg2.move = True
		#rospy.logdebug("before receiving error")
		if msg.j_position[1] == 0 and msg.j_position[2] == 1:
			grasp_msg=GraspObject()
			grasp_msg.grasp_direction="open"
			grasp_msg.spread=False
			grasp_msg.get_joint_vals=False		
			self.Grasp_Object(grasp_msg) # Open the hand
			#self.Grasp_Object(grasp_direction="open") # Open the hand
			
		# END NON OSU_ROS_ADEPT CODE

		if msg2 is not None:  # Move arm to new position
			self.handle_pose_check(msg2)
		else:
			# Sends osu_ros_adept message
			self.handle_pose_check(move_msg)
    
		#TODO: Fix this or remove it for the WAM operation.  Cannot use robot.SetTransform.
		# For Palm grasp option
		if msg.j_position[3] == 2:
			HandDirection = MyMatrix[0:3,2]
			for i in range(0,1000):  # loop until the object is touched or limit is reached
				MyMatrix[0:3,3] += .0001 * HandDirection
				with env:
					self.robot.SetTransform(MyMatrix)  # Move hand forward
				for link in self.robot.GetLinks():    # Check for collisions
					incollision=self.env.CheckCollision(link,report)
					if incollision:
						i = 1000
				if i == 1000:   # If hand is in collision, break from the loop
					break

		# Check to see if any of the manipulator buttons have been pressed.  Otherwise, move the hand.
		if msg.j_position[1] == 1 and msg.j_position[2] == 0:
			grasp_msg=GraspObject()
			grasp_msg.grasp_direction="close"
			grasp_msg.spread=False
			grasp_msg.get_joint_vals=False		
			self.Grasp_Object(grasp_msg) # Open the hand
			#self.Grasp_Object(grasp_direction="close") # Open the hand
		return
Example #34
0
    def joints_callback(self, pos):
		#rospy.logdebug(pos)
		# perform calculations to find new location        
		newpos = tf_conversions.fromMsg(pos)
		newpos = tf_conversions.toMatrix(newpos)

		#rospy.logdebug(newpos)
	
		# Create two matrices to modify the RPY matrix.  This is due to the fact that
		# the BarrettWAM hand is rotated 90deg with respect to the global frame.
		# The M.GetRPY function and Rotation.RPY use the X-axis as the roll axis
		# whereas the WAM wants to roll about the Z-axis.  Must rotate the RPY values
		# 90deg, then calculate the new RPY coordinates, then rotate back to OpenRAVE
		# coordinates again.
		#modMatrix = PyKDL.Rotation.RPY(0, -3.14159/2, 0)
		#modMatrix2 = PyKDL.Rotation.RPY(0, 3.14159/2, 0)

		oldrot = PyKDL.Rotation(newpos[0,0],newpos[0,1],newpos[0,2],newpos[1,0],newpos[1,1],newpos[1,2],newpos[2,0],newpos[2,1],newpos[2,2])
		#oldrot = oldrot * modMatrix  # rotating matrix
	
		# Get the RPY values from the new rotated matrix
		ftest = PyKDL.Frame(oldrot, PyKDL.Vector(0,0,0))
		testrot = ftest.M.GetRPY()

		# Calculate the new positions and roll, pitch, yaw
		roll = testrot[0] + .025 * (self.buttons[RB_IDX] - self.buttons[LB_IDX]) 
		pitch = testrot[1]
		if testrot[1] < -1.5 and self.axes[RIGHT_VERT] > 0 or testrot[1] > 1.5 and self.axes[RIGHT_VERT] < 0:
			pitch = testrot[1]
		else:
			pitch = testrot[1] - .025 * self.axes[RIGHT_VERT]
		yaw = testrot[2] + .025 * self.axes[RIGHT_HOR]
		z = ((self.axes[LEFT_TRIG]-3) - (self.axes[RIGHT_TRIG]-3))

	#
	#
	#  Two different styles of control that move the hand in different ways
	#
	#
		
		
	# Old move controls	
		#newpos[0,3] = newpos[0,3] + .01 * self.axes[LEFT_VERT]
		#newpos[1,3] = newpos[1,3] + .01 * self.axes[LEFT_HOR]
		#newpos[2,3] = newpos[2,3] + .01 * z
		
		#Weight to influence the speed of simulated robot motion				
		weight = .01 

	# "Cockpit" style control method
		newpos[0,3] = newpos[0,3] - weight * self.axes[LEFT_HOR] * math.sin(yaw) + weight * self.axes[1] * math.cos(yaw) * math.cos(pitch) + weight * z * math.sin(pitch) * math.cos(yaw)
		newpos[1,3] = newpos[1,3] + weight * self.axes[LEFT_VERT] * math.sin(yaw) * math.cos(pitch) + weight * self.axes[LEFT_HOR] * math.cos(yaw) + weight * z * math.sin(pitch) * math.sin(yaw)
		newpos[2,3] = newpos[2,3] + weight * z * math.cos(pitch) - weight * self.axes[LEFT_VERT] * math.sin(pitch)
	#rospy.logdebug('cos, sin, yaw: {0} ,{1} ,{2} ' .format(math.cos(yaw), math.sin(yaw), yaw) )


	# Create a frame for publishing.  Make sure to rotate back before creating the frame.
		v = PyKDL.Vector(newpos[0,3],newpos[1,3],newpos[2,3])
		r1 = PyKDL.Rotation.RPY(roll, pitch, yaw)
		#r1 = r1 * modMatrix2
		f1 = PyKDL.Frame(r1, v)	
	
	
	#the hand is non-existent. left over code from the wam. May be useful if someone attached a hand
	# Get the hand values and calculate the new positions
		#spread = pos.j_position[0]
		movement = self.buttons[A_IDX]		
		"""if self.buttons[2] and spread < 3.1:
			spread = spread + .01
		if self.buttons[0] and spread > 0:
			spread = spread - .01
		"""

		
		# publish the new position
		FrameMsg = tf_conversions.toMsg(f1)
		j_pos = array([movement,self.buttons[B_IDX],self.buttons[Y_IDX],0])
		#rospy.logdebug(self.buttons)
		j_vel = array([0,0,0,0])

		MyMsg = RobotPose()
		MyMsg.position = FrameMsg.position
		MyMsg.orientation = FrameMsg.orientation
		MyMsg.j_position = j_pos
		MyMsg.j_velocity = j_vel

		self.jointpub.publish(MyMsg)
def transform2matrix(transform):
    T = tf_conversions.fromTf(transform)
    T = tf_conversions.toMatrix(T)
    return T
Example #36
0
def callback_c2x_tf(data):
    """
    Stores the last c2x message, but transforms it beforehand.
    Transformation is only for position and velocity data, since this is the only data that is actively used by the
    program. The only exception is the plotting of bounding boxes. These will therefore not be rotated correctly, but
    their size will be correct anyway.
    """
    global history, steps, constant_velo, c2x_offset_x, c2x_offset_y
    tracks = data.tracks
    head = SimulatedVehicle.create_def_header(frame_id=src_id)
    # transformer: tf.TransformerROS
    for track in tracks:
        time_obj = rospy.Time(0)
        x_pos = track.box.center_x
        y_pos = track.box.center_y
        x_vel = track.box.velocity_x
        y_vel = track.box.velocity_y
        try:
            # Try to transform the point
            # Create a point with z=0 for the source frame (out of c2x tracks x/y) coordinate
            point = PointStamped(header=head,
                                 point=Point(x=x_pos, y=y_pos, z=0))
            # Don't use the current timestamp, use 0 to use the latest available tf data
            point.header.stamp = rospy.Time(0)
            # Now transform the point using the data
            tf_point = transformer.transformPoint(target_frame=dest_id,
                                                  ps=point)

            # Acquire the transformation matrix for this
            tf_mat = tf_c.toMatrix(
                tf_c.fromTf(
                    transformer.lookupTransform(target_frame=dest_id,
                                                source_frame=src_id,
                                                time=rospy.Time(0))))
            # print(tf_mat)

            # tf_vel stores the transformed velocity
            tf_vel = np.dot(tf_mat[0:2, 0:2], [x_vel, y_vel])
            # Update the track with the transformed data
            track.box.center_x = tf_point.point.x + c2x_offset_x
            track.box.center_y = tf_point.point.y + c2x_offset_y
            track.box.velocity_x = tf_vel[0]
            track.box.velocity_y = tf_vel[1]
            # DEBUG
            # print(str(track.box.center_x)+"\t"+str(track.box.center_y))
            # print("steps: "+str(steps)+"\tvelx: "+str(point_vel.point.x)+" vely: "+str(point_vel.point.y))
        except tf.ExtrapolationException as e:
            # Extrapolation error, print but keep going (possible just because only one frame was received so far)
            print(e)
        except tf.ConnectivityException as e:
            # print("Connectivity Exception during transform")
            print(e)
        except tf.LookupException as e:
            print(e)
            pass
    # Now change the frame_id to dest_id, since the tf was already performed
    data.header.frame_id = dest_id

    c2x.append(data)
    history.add(
        "c2x_0",
        data.tracks)  # Add the data to the history object under the name c2x_0

    # The following block adds several fake measurements to the c2x tracking that are time delayed to the original
    # measurement from the current timestep
    # They also have changed x/y data based on velocity, so that the track doesn't need to be reused later on with the
    # outdated coordinates and instead can work with "updated" coordinates for time steps that are not included in the
    # real c2x messages.
    add_points = 4  # how many "fake" points should be added between this and the next measuremen
    # Currently the number and timing of "fake" points are fixed. They are selected to fit the data sets 1&2, but
    # in general it can be better to use previous data to estimate the time difference and number of points
    if len(data.tracks
           ) > 0:  # only do the following if the track contained something
        for i in range(add_points):
            # add 4 more measurements, each based on the data, but with manipulated time and position based on velocity
            fake_data = copy.deepcopy(data)  # Create a copy of the data
            # now change the timestamp of this new data

            # c2x data arrives every 0.2 seconds, so to fit an additional 4 measurements
            time_shift = rospy.Duration(0,
                                        40000000)  # increase by 1/20 of a sec
            # this would need to change if it should automatically fit to data of other data sets.

            fake_data.header.stamp = fake_data.header.stamp + time_shift
            for track in fake_data.tracks:
                track.box.header.stamp = track.box.header.stamp + time_shift
                track.box.center_x += constant_velo * track.box.velocity_x * (
                    i + 1)
                track.box.center_y += constant_velo * track.box.velocity_y * (
                    i + 1)
            c2x.append(fake_data)
            history.add("c2x_0", fake_data.tracks)

    steps = 0
def get_transform_lists(bag_file_name):
    init_node()
    broadcaster = tf.TransformBroadcaster()
    listener = tf.TransformListener()
    listener.setUsingDedicatedThread(True)
    checkerboard_in_camera_trans = []
    wrist_in_body_trans = []
    #pdb.set_trace()
    camera_in_body_estimate = None
    checkerboard_to_wrist_estimate = None
    bag = rosbag.Bag(bag_file_name)

    tf_header_ids = set()
    tf_child_ids = set()

    for topic, message, time in bag.read_messages(topics=['tf', '/tf']):

        for tf_message_stamped in message.transforms:
            tf_message = tf_message_stamped.transform
            translation = [tf_message.translation.x, tf_message.translation.y, tf_message.translation.z]
            rotation = [tf_message.rotation.x, tf_message.rotation.y, tf_message.rotation.z, tf_message.rotation.w]
            broadcaster.sendTransform( translation, rotation, rospy.Time.now(), tf_message_stamped.child_frame_id, tf_message_stamped.header.frame_id)
            tf_message_stamped.header.stamp = rospy.Time.now()
            listener.setTransform(tf_message_stamped,"user")

        for tf_message in message.transforms:
            if tf_message.header.frame_id not in tf_header_ids:
                tf_header_ids.add(tf_message.header.frame_id)
                print 'found new frame %s' % tf_message.header.frame_id

            if tf_message.child_frame_id not in tf_child_ids:
                tf_child_ids.add(tf_message.child_frame_id)
                print 'found new child frame %s' % tf_message.child_frame_id

            if 'wrist_board' in tf_message.header.frame_id or 'wrist_board_corner' in tf_message.child_frame_id:
                print 'found keyframe'
                if camera_in_body_estimate is None:
                    try:
                        listener.waitForTransform('/camera_link','/world',rospy.Time(0), rospy.Duration(.01))
                        camera_in_body_tf = listener.lookupTransform('/camera_link','/world',rospy.Time(0))
                        
                        print "got camera to world transform"
                        camera_in_body_estimate = tf_conversions.toMatrix(tf_conversions.fromTf(camera_in_body_tf))
                    except:
                        print 'could not get camera to world transform, skipping. Are you sure you ran tf between camera_link and world?'
                        continue
                    print "got camera to world estimate"

                if checkerboard_to_wrist_estimate is None:
                    try:
                        listener.waitForTransform('/wrist_board','/wrist_board_corner',rospy.Time(0), rospy.Duration(.01))
                        #(trans, rot) = listener.lookupTransform('/wrist_board','/wrist_board_corner',rospy.Time(0))
                        #print trans
                        #print rot
                        checkerboard_to_wrist_tf = listener.lookupTransform('/wrist_board','/wrist_board_corner',rospy.Time(0))
                        #print checkerboard_to_wrist_tf
                        #raw_input("press a key")

                        #checkerboard_to_wrist_tf = ((1.75, -0.75, -0.121),(-0.09, -0.04, 0.73, 0.66))
                        #print 'yinxiao test'

                        print "got wristboad in wrist"
                        checkerboard_to_wrist_estimate = tf_conversions.toMatrix(tf_conversions.fromTf(checkerboard_to_wrist_tf))
                        
                    except:
                        print 'could not get wristboard to wrist_board_corner, skipping'
                        continue
                    print "got wristboard in wrist estimate"
                try:
                    listener.waitForTransform('/wrist_board','/camera_link',rospy.Time(0), rospy.Duration(.01))
                    listener.waitForTransform('/wrist_board_corner','/world',rospy.Time(0), rospy.Duration(.1))

                    checkerboard_tf = listener.lookupTransform('/wrist_board','/camera_link',rospy.Time(0))
                    #print "got wristboard in camera"

                    checkerboard_in_camera_trans.append(tf_conversions.toMatrix(tf_conversions.fromTf(checkerboard_tf)))

                    #print "got left wrist in world"
                    wrist_in_body_tf = listener.lookupTransform('/wrist_board_corner','/world',rospy.Time(0))
                    wrist_in_body_trans.append(tf_conversions.toMatrix(tf_conversions.fromTf(wrist_in_body_tf)))
                except:
                    continue
                #print "finished loop"

    return checkerboard_in_camera_trans, wrist_in_body_trans, camera_in_body_estimate, checkerboard_to_wrist_estimate
    def get_camera_tf(self):

        camera_frame_id = '/%s_camera_frame_id'%(self.arm)
        tf_listener.waitForTransform('/base',camera_frame_id, rospy.Time(0), rospy.Duration(2))
        image_tf = tf_listener.lookupTransform('/base',image.header.frame_id, rospy.Time(0))
        self.camera_transform = tf_conversions.toMatrix((tf_conversions.fromTf(image_tf)))