Example #1
0
def deltaPose(currPose, desPose, posFrame=None, rotFrame=None):
    """
    Returns pose0 - pose1
    """

    currPose, desPose = tfx.pose(currPose), tfx.pose(desPose)
    currPoseFrame, desPoseFrame = currPose.frame, desPose.frame
    
    currPos, desPos = currPose.position, desPose.position
    currRot, desRot = currPose.orientation, desPose.orientation

    if posFrame is not None and currPoseFrame is not None:
        tf_currPos_to_posFrame = tfx.lookupTransform(posFrame, currPoseFrame, wait=10)
        currPos = tf_currPos_to_posFrame * currPos

        tf_desPos_to_posFrame = tfx.lookupTransform(posFrame, desPoseFrame, wait=10)
        desPos = tf_desPos_to_posFrame * desPos

    if rotFrame is not None and currPoseFrame is not None:
        tf_currRot_to_rotFrame = tfx.lookupTransform(rotFrame, currPoseFrame, wait=10)
        currRot = tf_currRot_to_rotFrame * currRot

        tf_desRot_to_rotFrame = tfx.lookupTransform(rotFrame, desPoseFrame, wait=10)
        desRot = tf_desRot_to_rotFrame * desRot

    deltaPosition = desPos.array - currPos.array
    
    currQuat, desQuat = tfx.tb_angles(currRot).quaternion, tfx.tb_angles(desRot).quaternion
    deltaQuat = tft.quaternion_multiply(tft.quaternion_inverse(currQuat), desQuat)

    deltaPose = tfx.pose(deltaPosition, deltaQuat)

    return deltaPose
def deltaPose(currPose, desPose, posFrame=None, rotFrame=None):
    """
    Returns pose0 - pose1
    """

    currPose, desPose = tfx.pose(currPose), tfx.pose(desPose)
    currPoseFrame, desPoseFrame = currPose.frame, desPose.frame
    
    currPos, desPos = currPose.position, desPose.position
    currRot, desRot = currPose.orientation, desPose.orientation

    if posFrame is not None and currPoseFrame is not None:
        tf_currPos_to_posFrame = tfx.lookupTransform(posFrame, currPoseFrame, wait=10)
        currPos = tf_currPos_to_posFrame * currPos

        tf_desPos_to_posFrame = tfx.lookupTransform(posFrame, desPoseFrame, wait=10)
        desPos = tf_desPos_to_posFrame * desPos

    if rotFrame is not None and currPoseFrame is not None:
        tf_currRot_to_rotFrame = tfx.lookupTransform(rotFrame, currPoseFrame, wait=10)
        currRot = tf_currRot_to_rotFrame * currRot

        tf_desRot_to_rotFrame = tfx.lookupTransform(rotFrame, desPoseFrame, wait=10)
        desRot = tf_desRot_to_rotFrame * desRot

    deltaPosition = desPos.array - currPos.array
    
    currQuat, desQuat = tfx.tb_angles(currRot).quaternion, tfx.tb_angles(desRot).quaternion
    deltaQuat = tft.quaternion_multiply(tft.quaternion_inverse(currQuat), desQuat)

    deltaPose = tfx.pose(deltaPosition, deltaQuat)

    return deltaPose
 def getEstimatedPose(self):
     if self.estimated_gripper_pose is not None:
         estimated_gripper_pose = self.estimated_gripper_pose
         estimated_time = estimated_gripper_pose.stamp.seconds
     else:
         pose = tfx.pose([0,0,0], [0,0,0,1], frame=self.tool_frame, stamp=rospy.Time.now())
         tf_tool_to_cam = tfx.lookupTransform(self.camera_frame,self.tool_frame)
         estimated_gripper_pose = tf_tool_to_cam * pose
         estimated_time = rospy.Time.now().secs
         
     #estimated_time = estimated_gripper_pose.stamp.seconds
     detected_time = self.detected_gripper_pose.stamp.seconds if self.detected_gripper_pose is not None else -float("inf")
     
     if not estimated_time > detected_time:
         if math.fabs(estimated_time - detected_time) < 1:
             time_since_detection = 0
         else:
             time_since_detection = 0#float("inf")
     else:
         time_since_detection = estimated_time - detected_time
     #time_since_detection = (estimated_time - detected_time) if estimated_time > detected_time else float("inf")
     
     if self.print_messages:
         if self.detected_gripper_pose is None:
             rospy.loginfo('detected_gripper_pose is None')
         rospy.loginfo('estimated_time - detected_time = {0}'.format(estimated_time - detected_time))
         
     self.estimated_gripper_pose = None
     return estimated_gripper_pose, time_since_detection
    def __init__(self, arm=Constants.Arm.Left):
        self.arm = arm

        if self.arm == Constants.Arm.Left:
            self.toolframe = Constants.Frames.LeftTool
        else:
            self.toolframe = Constants.Frames.RightTool

        self.transFrame = Constants.Frames.Link0
        self.rotFrame = self.toolframe
            
        self.gripperControl = GripperControlClass(arm, tf.TransformListener())
        self.imageDetector = ARImageDetectionClass()
                
        self.homePose = self.imageDetector.getHomePose()

        # ar_frame is the target
        self.ar_frame = '/stereo_33'
        self.objectPose = tfx.pose([0,0,0], frame=self.ar_frame)
        tf_ar_to_link0 = tfx.lookupTransform(Constants.Frames.Link0, self.ar_frame, wait=5)
        self.objectPose = tf_ar_to_link0 * self.objectPose
        self.objectPose = tfx.pose(self.objectPose.position, tfx.tb_angles(-90,90,0))
        self.objectPose.position.y -= .002

        rospy.sleep(3)
Example #5
0
    def __init__(self):
        self.env = rave.Environment()
        self.env.SetViewer('qtcoin')

        rospy.loginfo('Before loading model')
        ravenFile = os.path.join(roslib.packages.get_pkg_subdir('RavenDebridement','models'),'myRaven.xml')
        
        #ravenFile = '/home/gkahn/ros_workspace/RavenDebridement/models/myRaven.xml'
        self.env.Load(ravenFile)
        rospy.loginfo('After loading model')
        
        self.robot = self.env.GetRobots()[0]
        
        # setup variables for kinect constraint generation
        self.num_cd_components = 50
        self.geom_type = 'cd'
        self.kinect_topic = '/camera/depth/points'
        self.kinect_depth_frame = '/camera_depth_optical_frame'
        self.robot_frame = '/world'
        self.T_kinect_to_robot = tfx.lookupTransform(self.robot_frame, self.kinect_depth_frame, wait=20)
        
        self.cd_body_names = []
        self.cloud_id = 0
        
        rospy.Subscriber(self.kinect_topic, PointCloud2, self.kinect_callback)
Example #6
0
    def __init__(self, tf_listener=None, arms=['R']):
        self.stages = []
        self.init_poses = {}
        self.start_time = None

        self.arms = arms

        self.default_speed = 0.01

        self.tf_listener = tf_listener
        if self.tf_listener is None:
            self.tf_listener = tfx.TransformListener.instance()
        rospy.loginfo('waiting for transform')
        for arm in arms:
            self.tf_listener.waitForTransform('/0_link', '/tool_' + arm,
                                              rospy.Time(0), rospy.Duration(5))

        for arm in arms:
            self.init_poses[arm] = tfx.pose(
                tfx.lookupTransform('/0_link', '/tool_' + arm))

        self.pub_cmd = rospy.Publisher('raven_command', RavenCommand)

        self.current_state = None
        self.current_poses = {}
        self.current_grasps = {}
        self.state_sub = rospy.Subscriber('raven_state',
                                          raven_2_msgs.msg.RavenState,
                                          self._state_callback)

        self.header = Header()
        self.header.frame_id = '/0_link'
Example #7
0
    def getEstimatedPose(self):
        if self.estimated_gripper_pose is not None:
            estimated_gripper_pose = self.estimated_gripper_pose
            estimated_time = estimated_gripper_pose.stamp.seconds
        else:
            pose = tfx.pose([0, 0, 0], [0, 0, 0, 1],
                            frame=self.tool_frame,
                            stamp=rospy.Time.now())
            tf_tool_to_cam = tfx.lookupTransform(self.camera_frame,
                                                 self.tool_frame)
            estimated_gripper_pose = tf_tool_to_cam * pose
            estimated_time = rospy.Time.now().secs

        #estimated_time = estimated_gripper_pose.stamp.seconds
        detected_time = self.detected_gripper_pose.stamp.seconds if self.detected_gripper_pose is not None else -float(
            "inf")

        if not estimated_time > detected_time:
            if math.fabs(estimated_time - detected_time) < 1:
                time_since_detection = 0
            else:
                time_since_detection = 0  #float("inf")
        else:
            time_since_detection = estimated_time - detected_time
        #time_since_detection = (estimated_time - detected_time) if estimated_time > detected_time else float("inf")

        if self.print_messages:
            if self.detected_gripper_pose is None:
                rospy.loginfo('detected_gripper_pose is None')
            rospy.loginfo(
                'estimated_time - detected_time = {0}'.format(estimated_time -
                                                              detected_time))

        self.estimated_gripper_pose = None
        return estimated_gripper_pose, time_since_detection
	def __init__(self,tf_listener=None,arms=['R']):
		self.stages = []
		self.init_poses = {}
		self.start_time = None
		
		self.arms = arms
		
		self.default_speed = 0.01
		
		self.tf_listener = tf_listener
		if self.tf_listener is None:
			self.tf_listener = tfx.TransformListener.instance()
		rospy.loginfo('waiting for transform')
		for arm in arms:
			self.tf_listener.waitForTransform('/0_link','/tool_'+arm,rospy.Time(0),rospy.Duration(5))
		
		for arm in arms:
			self.init_poses[arm] = tfx.pose(tfx.lookupTransform('/0_link','/tool_'+arm))
			
		self.pub_cmd = rospy.Publisher('raven_command', RavenCommand)
		
		self.current_state = None
		self.current_poses = {}
		self.current_grasps = {}
		self.state_sub = rospy.Subscriber('raven_state',raven_2_msgs.msg.RavenState,self._state_callback)
	
		self.header = Header()
		self.header.frame_id = '/0_link'
Example #9
0
def test_fk():
    arm = Arm('left')
    actual_pose = tfx.lookupTransform('base_link', 'l_gripper_tool_frame').as_pose()
    fk_pose = arm.fk(arm.current_joints)
    
    print('actual_pose: {0}'.format(actual_pose.matrix))
    print('fk_pose: {0}'.format(fk_pose.matrix))
    
    print('difference: {0}'.format(np.linalg.norm(actual_pose.matrix - fk_pose.matrix)))
    
    IPython.embed()
Example #10
0
def test_fk():
    arm = Arm('left')
    actual_pose = tfx.lookupTransform('base_link', 'l_gripper_tool_frame').as_pose()
    fk_pose = arm.fk(arm.current_joints)
    
    print('actual_pose: {0}'.format(actual_pose.matrix))
    print('fk_pose: {0}'.format(fk_pose.matrix))
    
    print('difference: {0}'.format(np.linalg.norm(actual_pose.matrix - fk_pose.matrix)))
    
    IPython.embed()
def determineROI(info):
    width = 250
    height = 250
    pose = tfx.pose([0, 0, 0], [0, 0, 0, 1], frame="/tool_L", stamp=rospy.Time.now())
    tf_tool_to_cam = tfx.lookupTransform("/left_optical_frame", "/tool_L", wait=2)
    pose = tf_tool_to_cam * pose
    stereoModel = image_geometry.StereoCameraModel()
    stereoModel.fromCameraInfo(info["l"], info["r"])
    (u_l, v_l), (u_r, v_r) = stereoModel.project3dToPixel(pose.position.list)
    print "LEFT CENTER : " + str(u_l) + ", " + str(v_l)
    print "RIGHT CENTER: " + str(u_r) + ", " + str(v_r)
    return (u_l, v_l), (u_r, v_r)
    def tapeCallback(self, msg):
        if not self.record:
            return

        self.recordCount += 1
        rospy.loginfo('Received tape pose number {0}'.format(self.recordCount))
        
        tapePose = tfx.pose(msg)
        stamp = tapePose.stamp

        tf_tape_to_link0 = tfx.lookupTransform(Constants.Frames.Link0, tapePose.frame, wait=4)
        tapePose = tf_tape_to_link0 * tapePose

        tfPose = tfx.pose([0,0,0], frame=self.toolFrame, stamp=stamp)
        tf_tfpose_to_link0 = tfx.lookupTransform(Constants.Frames.Link0, tfPose.frame, wait=4)
        tfPose = tf_tfpose_to_link0 * tfPose
        
        deltaPose = tfx.pose(Util.deltaPose(tfPose, tapePose))
        
        self.tapePoses.append(tapePose)
        self.tfPoses.append(tfPose)
        self.deltaPoses.append(deltaPose)
Example #13
0
    def __init__(self, davinciArmRight, traj, left_arm):
        super(self.__class__, self).__init__()
        smach.State.__init__(self, outcomes = ['success'], output_keys = ['new_traj_R'])
        self.davinciArmRight = davinciArmRight
        self.traj = traj
        self.left_arm = left_arm
        self.original_left_arm_traj = self.left_arm.traj
        self.sub = rospy.Subscriber("/circle_cut/ellipse_points_3d", MarkerArray, self.circle_position_cb)
        self.pub1_L = rospy.Publisher("/circle_cut/warped_traj_L", PoseStamped)
        self.pub2_L = rospy.Publisher("/circle_cut/original_traj_L", PoseStamped)
        self.pub1_R = rospy.Publisher("/circle_cut/warped_traj_R", PoseStamped)
        self.pub2_R = rospy.Publisher("/circle_cut/original_traj_R", PoseStamped)
        self.circle_0 = tfx.pose([-0.00746613866728, 0.0368420724794, 0.384582169117], (0.0, 0.0, 0.0, 1.0))
        self.circle_1 = None
        # Transformation between circle_0 and circle_1
        self.A = None

        # Code from previous warp technique
        # Transform from camera frame to left arm robot frame
        self.B_L = tfx.lookupTransform('one_remote_center_link', 'left_AD')
        # Transform from camera frame to right arm robot frame
        self.B_R = tfx.lookupTransform('two_remote_center_link', 'left_AD')
def convertToFrame(p, frame):
    """
    Takes in a tfx pose stamped and returns it in frame
    """
    p = tfx.pose(p)
    
    if p.frame and p.frame != frame:
        try:
            tf_pframe_to_frame = tfx.lookupTransform(frame, p.frame, wait=10)
        except Exception, e:
            print frame, p.frame
            raise e
        p = tf_pframe_to_frame * p
Example #15
0
def convertToFrame(p, frame):
    """
    Takes in a tfx pose stamped and returns it in frame
    """
    p = tfx.pose(p)
    
    if p.frame and p.frame != frame:
        try:
            tf_pframe_to_frame = tfx.lookupTransform(frame, p.frame, wait=10)
        except Exception, e:
            print frame, p.frame
            raise e
        p = tf_pframe_to_frame * p
 def _tracked_pose_cb(self, arm, msg):
     if not self.is_recording:
         return
     if self.camera_frame is None:
         self.camera_frame = msg.header.frame_id
     
     if self.camera_to_robot_tf is None and self.camera_frame is not None and self.robot_frame is not None:
         self.camera_to_robot_tf = tfx.lookupTransform(self.robot_frame, self.camera_frame).array
     elif self.camera_to_robot_tf is not None:
         # store poses, put them in the correct frame
         self.camera_msgs = self.camera_msgs+1
         pose = tfx.pose(msg)
         #print pose
         self.camera_poses[arm].append((pose.stamp.seconds,self.camera_to_robot_tf.dot(pose.array)))
 def _truthCallback(self,arm,msg):
     if not self.estimateFromImage.get(arm, True):
         return
     
     #rospy.loginfo("%f",msg.header.stamp.to_sec())
     try:
         #rospy.loginfo('looking up transform')
         tf_msg_to_link0 = tfx.lookupTransform(Constants.Frames.Link0, msg.header.frame_id, wait=5)
         truthPose = tf_msg_to_link0 * tfx.pose(msg)
         
         #rospy.loginfo('found transform')
         #truthPose = tfx.convertToFrame(msg, Constants.Frames.Link0, ignore_stamp=True)
     except Exception, e:
         print e
         raise e
 def _truthCallback(self,arm,msg):
     if not self.estimateFromImage.get(arm, True):
         return
     
     #rospy.loginfo("%f",msg.header.stamp.to_sec())
     try:
         #rospy.loginfo('looking up transform')
         tf_msg_to_link0 = tfx.lookupTransform(raven_constants.Frames.Link0, msg.header.frame_id, wait=5)
         truthPose = tf_msg_to_link0 * tfx.pose(msg)
         
         #rospy.loginfo('found transform')
         #truthPose = tfx.convertToFrame(msg, raven_constants.Frames.Link0, ignore_stamp=True)
     except Exception, e:
         print e
         raise e
Example #19
0
 def _handles_loop(self):
     """
     For each handle in HandleListMsg,
     calculate average pose
     """
     rospy.sleep(5)
     
     while not rospy.is_shutdown() and not self.exited:
         rospy.sleep(.01)
         
         handle_list_msg = self.handle_list_msg
         
         pose_array = gm.PoseArray()
         pose_array.header.frame_id = handle_list_msg.header.frame_id
         pose_array.header.stamp = rospy.Time.now()
         
         avg_pose_array = gm.PoseArray()
         avg_pose_array.header.frame_id = handle_list_msg.header.frame_id
         avg_pose_array.header.stamp = rospy.Time.now()
 
         cam_to_base = tfx.lookupTransform('base_link', handle_list_msg.header.frame_id).matrix[:3,:3]
         switch = np.matrix([[0, 1, 0],
                             [1, 0, 0],
                             [0, 0, 1]])        
         for handle in handle_list_msg.handles:
             all_poses = [tfx.pose(cylinder.pose, stamp=rospy.Time.now(), frame=handle_list_msg.header.frame_id) for cylinder in handle.cylinders]
             
             rotated_poses = [tfx.pose(p.position, tfx.tb_angles(p.orientation.matrix*switch)) for p in all_poses]
             filtered_poses = list()
             for rot_pose in rotated_poses:
                 r_base = cam_to_base*rot_pose.orientation.matrix
                 if r_base[0,0] > 0:
                     if r_base[2,2] > 0:
                         rot_pose.orientation = tfx.tb_angles(rot_pose.orientation.matrix*tfx.tb_angles(0,0,180).matrix) 
                     filtered_poses.append(rot_pose)
             
             pose_array.poses += [pose.msg.Pose() for pose in filtered_poses]
             
             if len(filtered_poses) > 0:
                 avg_position = sum([p.position.array for p in filtered_poses])/float(len(filtered_poses))
                 avg_quat = sum([p.orientation.quaternion for p in filtered_poses])/float(len(filtered_poses))
                 avg_pose_array.poses.append(tfx.pose(avg_position, avg_quat).msg.Pose())
             
             
         if len(pose_array.poses) > 0:
             self.handles_pose_pub.publish(pose_array)
             self.avg_handles_pose_pub.publish(avg_pose_array)
    def getObjectPose(self):
        """
        Returns object point plus the table normal as the orientation
        
        Returns None if no object found
        """
        if not self.hasFoundObject():
            return None

        objectPoint = tfx.point(self.objectPoint, stamp=rospy.Time.now())
        tf_point_to_normal = tfx.lookupTransform(Constants.Frames.Link0, objectPoint.frame, wait=10)
        objectPoint = tf_point_to_normal * objectPoint


        objectPose = tfx.pose(self.objectPoint, self.normal)

        return objectPose.msg.PoseStamped()
Example #21
0
def testExecuteJointTrajectoryBSP(arm=raven_constants.Arm.Right):
    rospy.init_node('test_trajopt',anonymous=True)
    ravenArm = RavenArm(arm)
    ravenPlanner = RavenBSP(arm)
    rospy.sleep(4)


    if arm == raven_constants.Arm.Right:
        toolframe = raven_constants.Frames.RightTool
    else:
        toolframe = raven_constants.Frames.LeftTool

    currPose = tfx.pose([0,0,0], frame=toolframe)
    tf_tool_to_link0 = tfx.lookupTransform(raven_constants.Frames.Link0, currPose.frame, wait=5)
    currPose = tf_tool_to_link0 * currPose

    angle = tfx.tb_angles(-90,90,0)
    endPosition = currPose.position
    endPosition.y += .05
    endPose = tfx.pose(endPosition, angle,frame=raven_constants.Frames.Link0)


    rospy.loginfo('Press enter to call bsp')
    raw_input()

    jointTraj = ravenPlanner.getTrajectoryFromPose(endPose)
    
    if jointTraj == None:
        return
    
    rospy.loginfo('Press enter to move')
    raw_input()

    ravenArm.start()
    rospy.sleep(1)
    
    ravenArm.executeJointTrajectory(jointTraj)
    
    
    rospy.loginfo('Press enter to exit')
    raw_input()
def testExecuteJointTrajectoryBSP(arm=MyConstants.Arm.Right):
    rospy.init_node('test_trajopt',anonymous=True)
    ravenArm = RavenArm(arm)
    ravenPlanner = RavenBSP(arm)
    rospy.sleep(4)


    if arm == MyConstants.Arm.Right:
        toolframe = MyConstants.Frames.RightTool
    else:
        toolframe = MyConstants.Frames.LeftTool

    currPose = tfx.pose([0,0,0], frame=toolframe)
    tf_tool_to_link0 = tfx.lookupTransform(MyConstants.Frames.Link0, currPose.frame, wait=5)
    currPose = tf_tool_to_link0 * currPose

    angle = tfx.tb_angles(-90,90,0)
    endPosition = currPose.position
    endPosition.y += .05
    endPose = tfx.pose(endPosition, angle,frame=MyConstants.Frames.Link0)


    rospy.loginfo('Press enter to call bsp')
    raw_input()

    jointTraj = ravenPlanner.getTrajectoryFromPose(endPose)
    
    if jointTraj == None:
        return
    
    rospy.loginfo('Press enter to move')
    raw_input()

    ravenArm.start()
    rospy.sleep(1)
    
    ravenArm.executeJointTrajectory(jointTraj)
    
    
    rospy.loginfo('Press enter to exit')
    raw_input()
    def reset(self):
        """
        Added

        Resets player, allows for reuse
        """
        self.stages = []
        self.init_poses = {}
        self.start_time = None

        self.default_speed = .01

        for arm in self.arms:
            self.init_poses[arm] = tfx.pose(tfx.lookupTransform('/0_link','/tool_'+arm))
		
        self.current_state = None
        self.current_poses = {}
        self.current_grasps = {}
		
        # ADDED
        self.continuePlaying = True
    def findObject(self, failMethod=None, successMethod=None):
        failMethod = failMethod or self.findObject
        successMethod = successMethod or self.findGripper

        rospy.loginfo('Searching for object point')
        # find object point and pose
        if not self.imageDetector.hasFoundObject():
            rospy.loginfo('Did not find object')
            return failMethod
        # get object w.r.t. toolframe
        self.objectPose = self.imageDetector.getObjectPose(self.toolframe)

        objectPose = tfx.pose(self.objectPose)
        tf_obj_to_link0 = tfx.lookupTransform(Constants.Frames.Link0, objectPose.frame, wait=5)
        objectPose = tf_obj_to_link0 * objectPose
        self.objectPose = objectPose.msg.PoseStamped()

        self.objectPose.pose.position.z += self.objectHeightOffset
        self.publishObjectPose(self.objectPose)

        rospy.loginfo('Found object')
        return successMethod
Example #25
0
def kinect_transform_publisher():
    global head_pose, hand_pose
    head_pose = None
    hand_pose = None
    dummy = tfx.transform([0,0,0])
    #pub = rospy.Publisher('kinect_transform', )
    pub = tfx.TransformBroadcaster()
    rospy.init_node('kinect_transform_publisher', anonymous=True)

    rospy.Subscriber("/hand_kinect_ar_kinect_pose", ARMarkers, hand_callback)
    rospy.Subscriber("/head_kinect_ar_kinect_pose", ARMarkers, head_callback)
    transformer = tfx.TransformListener()
    r = rospy.Rate(5)

    #head_pose = True # REMOVE THIS
    #head_pose = False # REMOVE THIS
    i = 0
    transforms = []
    while not rospy.is_shutdown() and i < 500:
        if head_pose and hand_pose:
            head_transform = tfx.transform(head_pose, parent='camera_rgb_optical_frame', child='ar_frame')#.as_transform()
            hand_transform = tfx.transform(hand_pose, parent='hand_kinect_optical_frame', child='ar_frame')#.as_transform()
            #head_to_ar = tfx.transform(transformer.lookupTransform('ar_frame', 'camera_rgb_optical_frame', rospy.Time()), parent='camera_rgb_optical_frame', child='ar_frame')
            #ar_to_hand = tfx.transform(transformer.lookupTransform('hand_kinect_optical_frame', 'ar_frame', rospy.Time()), parent='ar_frame', child='hand_kinect_optical_frame')
            head_to_hand = tfx.transform(head_transform.matrix * hand_transform.inverse().matrix, parent='camera_rgb_optical_frame', child='hand_kinect_optical_frame')
            #head_to_hand = tfx.transform(head_to_ar.inverse().matrix * ar_to_hand.matrix, parent='camera_rgb_optical_frame', child='hand_kinect_optical_frame')
            #rospy.loginfo(head_transform)
            #rospy.loginfo(hand_transform.inverse())
            #rospy.loginfo(head_to_ar)
            #rospy.loginfo(ar_to_hand)
            #print head_to_hand
            wrist_to_head = tfx.lookupTransform('r_gripper_tool_frame', 'camera_rgb_optical_frame')
            #wrist_to_head = tfx.transform(transformer.lookupTransform('r_gripper_tool_frame', 'camera_rgb_optical_frame', rospy.Time()), child='camera_rgb_optical_frame', parent='r_gripper_tool_frame')
            wrist_to_hand = tfx.transform(wrist_to_head.matrix * head_to_hand.matrix, parent='r_gripper_tool_frame', child='hand_kinect_optical_frame')
            #print wrist_to_head
            print wrist_to_hand
            print
            #rospy.loginfo(wrist_to_head)
            #rospy.loginfo(wrist_to_hand)
            #pub.sendTransform(wrist_to_hand.position, wrist_to_hand.rotation, rospy.Time(), wrist_to_hand.child, wrist_to_hand.parent)
            #pub.sendTransform(head_to_hand.position, head_to_hand.rotation, rospy.Time(), head_to_hand.child, head_to_hand.parent)
            transforms.append(wrist_to_hand)
            i += 1
        else:
            if head_pose is None:
                print('Head pose is None')
            if hand_pose is None:
                print('Hand pose is None')
            print('')
        r.sleep()
    transform = transforms[0]
    for i in range(1, len(transforms)):
        transform = transform.interpolate(transforms[i], 1 / len(transforms))
    transform.child = "camera_rgb_optical_frame"
    print transform
    rospack = rospkg.RosPack()
    os.chdir(rospack.get_path("kinect_transform"))
    s = '<launch>\n'
    s += '<node pkg="tf" type="static_transform_publisher" name="static3" args="'
    for value in transform.position.list + transform.quaternion.list:
        s += str(value) + ' '
    s += transform.parent + ' ' + transform.child + ' '
    s += '100" />\n'
    s += '</launch>'
    try:
        f = open("launch/wrist_to_hand.launch", "w")
        f.write(s)
    finally:
        f.close()
    def threshRed(self):
        THRESH_RED_WIDTH_ROI = 200
        THRESH_RED_HEIGHT_ROI = 90
        
        if not self.img.has_key('left') or not self.img.has_key('right'):
            return

        leftImg = cv.CloneMat(self.img['left'])
        rightImg = cv.CloneMat(self.img['right'])
        width = cv.GetSize(leftImg)[0]
        height = cv.GetSize(leftImg)[1]
        
        estimated_gripper_pose, time_since_detection = self.getEstimatedPose()

        gripper_pos = tfx.pose(estimated_gripper_pose).position
        tf_tool_to_cam = tfx.lookupTransform('/left_BC',gripper_pos.frame,wait=10)        
        gripper_pos = tf_tool_to_cam * gripper_pos
        stereoModel = image_geometry.StereoCameraModel()
        stereoModel.fromCameraInfo(self.info['left'], self.info['right'])
        (u_l, v_l), (u_r, v_r) = stereoModel.project3dToPixel(gripper_pos.list)
        
        def expandROIRegion(start_ROI, elapsed_time, max_elapsed_time=20., growth_factor=2.):
            elapsed_time = min(elapsed_time,max_elapsed_time)
            
            return int(start_ROI + start_ROI * (elapsed_time/max_elapsed_time) * growth_factor)
        
        #THRESH_RED_WIDTH_ROI = expandROIRegion(THRESH_RED_WIDTH_ROI, time_since_detection)
        #THRESH_RED_HEIGHT_ROI = expandROIRegion(THRESH_RED_HEIGHT_ROI, time_since_detection)

        leftImg_lb_width = int(u_l-THRESH_RED_WIDTH_ROI) if int(u_l-THRESH_RED_WIDTH_ROI) > 0 else 0
        leftImg_ub_width = int(u_l+THRESH_RED_WIDTH_ROI) if int(u_l+THRESH_RED_WIDTH_ROI) < width else width-1
        leftImg_lb_height = int(v_l-THRESH_RED_HEIGHT_ROI) if int(v_l-THRESH_RED_HEIGHT_ROI) > 0 else 0
        leftImg_ub_height = int(v_l+THRESH_RED_HEIGHT_ROI) if int(v_l+THRESH_RED_HEIGHT_ROI) < height else height-1
        rightImg_lb_width = int(u_r-THRESH_RED_WIDTH_ROI) if int(u_r-THRESH_RED_WIDTH_ROI) > 0 else 0
        rightImg_ub_width = int(u_r+THRESH_RED_WIDTH_ROI) if int(u_r+THRESH_RED_WIDTH_ROI) < width else width-1
        rightImg_lb_height = int(v_r-THRESH_RED_HEIGHT_ROI) if int(v_r-THRESH_RED_HEIGHT_ROI) > 0 else 0
        rightImg_ub_height = int(v_r+THRESH_RED_HEIGHT_ROI) if int(v_r+THRESH_RED_HEIGHT_ROI) < height else height-1

        if self.print_messages:
            print('(height, width) = ({0}, {1})'.format(height,width))
            print('time_since_detection: {0}'.format(time_since_detection))
            print('leftImg[{0}:{1},{2}:{3}]'.format(leftImg_lb_height,leftImg_ub_height,leftImg_lb_width,leftImg_ub_width))
            print('rightImg[{0}:{1},{2}:{3}]'.format(rightImg_lb_height,rightImg_ub_height,rightImg_lb_width,rightImg_ub_width))
        
        if leftImg_lb_width >= leftImg_ub_width or leftImg_lb_height >= leftImg_ub_height or rightImg_lb_width >= rightImg_ub_width or rightImg_lb_height >= rightImg_ub_height:
            return

        leftImg = leftImg[leftImg_lb_height:leftImg_ub_height, leftImg_lb_width:leftImg_ub_width]
        rightImg = rightImg[rightImg_lb_height:rightImg_ub_height, rightImg_lb_width:rightImg_ub_width]
        if self.show_thresh_red_images:
            self.red_roi_pub.publish(self.bridge.cv_to_imgmsg(leftImg))

        leftThresh = cv.CreateImage(cv.GetSize(leftImg),8,1)
        rightThresh = cv.CreateImage(cv.GetSize(rightImg),8,1)        
        leftThresh = threshold(leftImg, leftImg, leftThresh, RED_LOWER, RED_UPPER)
        rightThresh = threshold(rightImg, rightImg, rightThresh, RED_LOWER, RED_UPPER)
        
        if self.show_thresh_red_images:
            self.red_thresh_roi_pub.publish(self.bridge.cv_to_imgmsg(leftThresh))

        leftContours = find_contours(leftThresh, "leftThresh")
        rightContours = find_contours(rightThresh, "rightThresh")
        foundRed = True
        if len(leftContours) > 0 or len(rightContours) > 0:
            self.lastFoundRed = rospy.Time.now()
            foundRed = True
        else:
            if self.lastFoundRed is not None and rospy.Time.now() - self.lastFoundRed < self.redHistoryDuration:
                foundRed = True
            else:
                foundRed = False

        toolPose = tfx.pose([0,0,0],frame=self.tool_frame,stamp=rospy.Time.now()).msg.PoseStamped()
        if foundRed:
            self.red_thresh_marker_pub.publish(createMarker(toolPose,color="red"))
        else:
            self.red_thresh_marker_pub.publish(createMarker(toolPose,color="blue"))
        
        self.red_thresh_pub.publish(Bool(foundRed))
Example #27
0
# BEGIN generate_mesh
def generate_mesh(cloud):
    #cloud = cloudprocpy.fastBilateralFilter(cloud, 15, .05) # smooth out the depth image
    big_mesh = cloudprocpy.meshOFM(cloud, 3, .1) # use pcl OrganizedFastMesh to make mesh
    simple_mesh = cloudprocpy.quadricSimplifyVTK(big_mesh, .02) # decimate mesh with VTK function
    return simple_mesh
# END generate_mesh

def get_xyz_world_frame(cloud):
    xyz1 = cloud.to2dArray()
    xyz1[:,3] = 1
    return xyz1.dot(T_w_k.T)[:,:3]

rospy.init_node('listener', anonymous=True)
#T_w_k = np.eye(4)
T_w_k = np.array(tfx.lookupTransform('/world', '/camera_depth_optical_frame').matrix)


def isValidMesh(mesh):
    vertices = mesh.getVertices()

    for vertex in vertices:
	if vertex[2] > .58:
	    return False

    return True


env = openravepy.Environment()
env.Load('/home/annal/src/RavenDebridement/models/myRaven.xml')
env.SetViewer('qtcoin') # attach viewer (optional)
 def _estimatedPoseCallback(self, msg):
     pose = tfx.pose(msg)
     if self.camera_frame is not None:
         tf_poseframe_to_cam = tfx.lookupTransform(self.camera_frame,pose.frame)
         self.estimated_gripper_pose = tf_poseframe_to_cam * pose
def testExecuteJointTrajectory(arm=MyConstants.Arm.Right):
    rospy.init_node('test_trajopt',anonymous=True)
    ravenArm = RavenArm(arm)
    ravenPlanner = RavenPlanner(arm)
    rospy.sleep(4)


    if arm == MyConstants.Arm.Right:
        toolframe = MyConstants.Frames.RightTool
    else:
        toolframe = MyConstants.Frames.LeftTool

    currPose = tfx.pose([0,0,0], frame=toolframe)
    tf_tool_to_link0 = tfx.lookupTransform(MyConstants.Frames.Link0, currPose.frame, wait=5)
    currPose = tf_tool_to_link0 * currPose

    angle = tfx.tb_angles(-90,90,0)
    endPosition = currPose.position
    endPosition.x -= .05
    endPose = tfx.pose(endPosition, angle,frame=MyConstants.Frames.Link0)


    startJoints = ravenArm.getCurrentJoints()

    ####### TEMP
    # so adding box will work, don't normally need to do though
    ravenPlanner.updateOpenraveJoints(startJoints)
    box = rave.RaveCreateKinBody(ravenPlanner.env,'')
    box.SetName('testbox')
    box.InitFromBoxes(np.array([[0,-.025,0,0.1,0.01,0.01]]),True)
    #code.interact(local=locals())
    ee = ravenPlanner.manip.GetEndEffectorTransform()
    ee[:3,:3] = np.identity(3)
    box.SetTransform(ee)
    ravenPlanner.env.Add(box,True)
    #ravenPlanner.env.SetViewer('qtcoin')
    rospy.loginfo('Box created, press enter')
    ravenFile = os.path.dirname(__file__) + '/../../../models/myRavenEnv.uri'
    ravenPlanner.env.Save(ravenFile)
    #code.interact(local=locals())
    return
    
    ###########

    #ravenPlanner.env.SetViewer('qtcoin')

    rospy.loginfo('Press enter to call trajopt')
    raw_input()
    rospy.sleep(1)

    jointTraj = ravenPlanner.getTrajectoryFromPose(endPose, reqType=Request.Type.Pose)
    
    if jointTraj == None:
        return
    
    rospy.loginfo('Press enter to move')
    raw_input()

    #ravenArm.start()
    
    #ravenArm.executeJointTrajectory(jointTraj)
    
    
    #ravenPlanner.updateOpenraveJoints(jointTraj[0])

    endJoints = ravenPlanner.getJointsFromPose(endPose)
    ravenPlanner.updateOpenraveJoints(endJoints)
    endJointsEE = ravenPlanner.manip.GetEndEffectorTransform()

    ravenPlanner.updateOpenraveJoints(jointTraj[-1])
    jointTrajEE = ravenPlanner.manip.GetEndEffectorTransform()

    #Util.plot_transform(ravenPlanner.env, endJointsEE)
    #Util.plot_transform(ravenPlanner.env, jointTrajEE)

    code.interact(local=locals())

    rospy.loginfo('Press enter to exit')
    raw_input()
Example #30
0
def testExecuteJointTrajectory(arm=raven_constants.Arm.Right):
    rospy.init_node('test_trajopt',anonymous=True)
    ravenArm = RavenArm(arm)
    ravenPlanner = RavenPlanner(arm)
    rospy.sleep(4)


    if arm == raven_constants.Arm.Right:
        toolframe = raven_constants.Frames.RightTool
    else:
        toolframe = raven_constants.Frames.LeftTool

    currPose = tfx.pose([0,0,0], frame=toolframe)
    tf_tool_to_link0 = tfx.lookupTransform(raven_constants.Frames.Link0, currPose.frame, wait=5)
    currPose = tf_tool_to_link0 * currPose

    angle = tfx.tb_angles(-90,90,0)
    endPosition = currPose.position
    endPosition.x -= .05
    endPose = tfx.pose(endPosition, angle,frame=raven_constants.Frames.Link0)


    startJoints = ravenArm.getCurrentJoints()

    ####### TEMP
    # so adding box will work, don't normally need to do though
    ravenPlanner.updateOpenraveJoints(startJoints)
    box = rave.RaveCreateKinBody(ravenPlanner.env,'')
    box.SetName('testbox')
    box.InitFromBoxes(np.array([[0,-.025,0,0.1,0.01,0.01]]),True)
    #code.interact(local=locals())
    ee = ravenPlanner.manip.GetEndEffectorTransform()
    ee[:3,:3] = np.identity(3)
    box.SetTransform(ee)
    ravenPlanner.env.Add(box,True)
    #ravenPlanner.env.SetViewer('qtcoin')
    rospy.loginfo('Box created, press enter')
    ravenFile = os.path.dirname(__file__) + '/../../../models/myRavenEnv.uri'
    ravenPlanner.env.Save(ravenFile)
    #code.interact(local=locals())
    return
    
    ###########

    #ravenPlanner.env.SetViewer('qtcoin')

    rospy.loginfo('Press enter to call trajopt')
    raw_input()
    rospy.sleep(1)

    jointTraj = ravenPlanner.getTrajectoryFromPose(endPose, reqType=Request.Type.Pose)
    
    if jointTraj == None:
        return
    
    rospy.loginfo('Press enter to move')
    raw_input()

    #ravenArm.start()
    
    #ravenArm.executeJointTrajectory(jointTraj)
    
    
    #ravenPlanner.updateOpenraveJoints(jointTraj[0])

    endJoints = ravenPlanner.getJointsFromPose(endPose)
    ravenPlanner.updateOpenraveJoints(endJoints)
    endJointsEE = ravenPlanner.manip.GetEndEffectorTransform()

    ravenPlanner.updateOpenraveJoints(jointTraj[-1])
    jointTrajEE = ravenPlanner.manip.GetEndEffectorTransform()

    #raven_util.plot_transform(ravenPlanner.env, endJointsEE)
    #raven_util.plot_transform(ravenPlanner.env, jointTrajEE)

    code.interact(local=locals())

    rospy.loginfo('Press enter to exit')
    raw_input()
Example #31
0
    def threshRed(self):
        THRESH_RED_WIDTH_ROI = 200
        THRESH_RED_HEIGHT_ROI = 90

        if not self.img.has_key('left') or not self.img.has_key('right'):
            return

        leftImg = cv.CloneMat(self.img['left'])
        rightImg = cv.CloneMat(self.img['right'])
        width = cv.GetSize(leftImg)[0]
        height = cv.GetSize(leftImg)[1]

        estimated_gripper_pose, time_since_detection = self.getEstimatedPose()

        gripper_pos = tfx.pose(estimated_gripper_pose).position
        tf_tool_to_cam = tfx.lookupTransform('/left_BC',
                                             gripper_pos.frame,
                                             wait=10)
        gripper_pos = tf_tool_to_cam * gripper_pos
        stereoModel = image_geometry.StereoCameraModel()
        stereoModel.fromCameraInfo(self.info['left'], self.info['right'])
        (u_l, v_l), (u_r, v_r) = stereoModel.project3dToPixel(gripper_pos.list)

        def expandROIRegion(start_ROI,
                            elapsed_time,
                            max_elapsed_time=20.,
                            growth_factor=2.):
            elapsed_time = min(elapsed_time, max_elapsed_time)

            return int(start_ROI + start_ROI *
                       (elapsed_time / max_elapsed_time) * growth_factor)

        #THRESH_RED_WIDTH_ROI = expandROIRegion(THRESH_RED_WIDTH_ROI, time_since_detection)
        #THRESH_RED_HEIGHT_ROI = expandROIRegion(THRESH_RED_HEIGHT_ROI, time_since_detection)

        leftImg_lb_width = int(
            u_l -
            THRESH_RED_WIDTH_ROI) if int(u_l - THRESH_RED_WIDTH_ROI) > 0 else 0
        leftImg_ub_width = int(u_l + THRESH_RED_WIDTH_ROI) if int(
            u_l + THRESH_RED_WIDTH_ROI) < width else width - 1
        leftImg_lb_height = int(v_l - THRESH_RED_HEIGHT_ROI) if int(
            v_l - THRESH_RED_HEIGHT_ROI) > 0 else 0
        leftImg_ub_height = int(v_l + THRESH_RED_HEIGHT_ROI) if int(
            v_l + THRESH_RED_HEIGHT_ROI) < height else height - 1
        rightImg_lb_width = int(
            u_r -
            THRESH_RED_WIDTH_ROI) if int(u_r - THRESH_RED_WIDTH_ROI) > 0 else 0
        rightImg_ub_width = int(u_r + THRESH_RED_WIDTH_ROI) if int(
            u_r + THRESH_RED_WIDTH_ROI) < width else width - 1
        rightImg_lb_height = int(v_r - THRESH_RED_HEIGHT_ROI) if int(
            v_r - THRESH_RED_HEIGHT_ROI) > 0 else 0
        rightImg_ub_height = int(v_r + THRESH_RED_HEIGHT_ROI) if int(
            v_r + THRESH_RED_HEIGHT_ROI) < height else height - 1

        if self.print_messages:
            print('(height, width) = ({0}, {1})'.format(height, width))
            print('time_since_detection: {0}'.format(time_since_detection))
            print('leftImg[{0}:{1},{2}:{3}]'.format(leftImg_lb_height,
                                                    leftImg_ub_height,
                                                    leftImg_lb_width,
                                                    leftImg_ub_width))
            print('rightImg[{0}:{1},{2}:{3}]'.format(rightImg_lb_height,
                                                     rightImg_ub_height,
                                                     rightImg_lb_width,
                                                     rightImg_ub_width))

        if leftImg_lb_width >= leftImg_ub_width or leftImg_lb_height >= leftImg_ub_height or rightImg_lb_width >= rightImg_ub_width or rightImg_lb_height >= rightImg_ub_height:
            return

        leftImg = leftImg[leftImg_lb_height:leftImg_ub_height,
                          leftImg_lb_width:leftImg_ub_width]
        rightImg = rightImg[rightImg_lb_height:rightImg_ub_height,
                            rightImg_lb_width:rightImg_ub_width]
        if self.show_thresh_red_images:
            self.red_roi_pub.publish(self.bridge.cv_to_imgmsg(leftImg))

        leftThresh = cv.CreateImage(cv.GetSize(leftImg), 8, 1)
        rightThresh = cv.CreateImage(cv.GetSize(rightImg), 8, 1)
        leftThresh = threshold(leftImg, leftImg, leftThresh, RED_LOWER,
                               RED_UPPER)
        rightThresh = threshold(rightImg, rightImg, rightThresh, RED_LOWER,
                                RED_UPPER)

        if self.show_thresh_red_images:
            self.red_thresh_roi_pub.publish(
                self.bridge.cv_to_imgmsg(leftThresh))

        leftContours = find_contours(leftThresh, "leftThresh")
        rightContours = find_contours(rightThresh, "rightThresh")
        foundRed = True
        if len(leftContours) > 0 or len(rightContours) > 0:
            self.lastFoundRed = rospy.Time.now()
            foundRed = True
        else:
            if self.lastFoundRed is not None and rospy.Time.now(
            ) - self.lastFoundRed < self.redHistoryDuration:
                foundRed = True
            else:
                foundRed = False

        toolPose = tfx.pose([0, 0, 0],
                            frame=self.tool_frame,
                            stamp=rospy.Time.now()).msg.PoseStamped()
        if foundRed:
            self.red_thresh_marker_pub.publish(
                createMarker(toolPose, color="red"))
        else:
            self.red_thresh_marker_pub.publish(
                createMarker(toolPose, color="blue"))

        self.red_thresh_pub.publish(Bool(foundRed))
Example #32
0
 def _estimatedPoseCallback(self, msg):
     pose = tfx.pose(msg)
     if self.camera_frame is not None:
         tf_poseframe_to_cam = tfx.lookupTransform(self.camera_frame,
                                                   pose.frame)
         self.estimated_gripper_pose = tf_poseframe_to_cam * pose
Example #33
0
    def callback(self, msg):
        handles = list()
        for handle in msg.handles:
            for cylinder in handle.cylinders:
                handle_pose = tfx.pose(cylinder.pose,
                                       stamp=msg.header.stamp,
                                       frame=msg.header.frame_id)

                if handle_pose.orientation.matrix[2, 2] > 0:
                    continue

                g = np.array(
                    [cylinder.axis.x, cylinder.axis.y, cylinder.axis.z])
                r = np.array(
                    [cylinder.normal.x, cylinder.normal.y, cylinder.normal.z])
                b = np.cross(r, g)

                g = -g
                b = -b

                rot = np.zeros((3, 3))
                rot[:, 0] = r / np.linalg.norm(r)
                rot[:, 1] = g / np.linalg.norm(g)
                rot[:, 2] = b / np.linalg.norm(b)

                handle_pose.orientation = tfx.tb_angles(
                    np.dot(tfx.tb_angles(-90, 0, 0).matrix, rot))
                """
                rot = np.array(handle_pose.orientation.matrix)
                if rot[2,2] > 0:
                    # facing wrong way
                    new_rot = np.zeros((3,3))
                    
                    #new_rot[:3,0] = rot[:3,1]
                    #new_rot[:3,1] = rot[:3,0]
                    #new_rot[:3,2] = -rot[:3,2]
                    
                    new_rot[:3,0] = rot[:3,0]
                    new_rot[:3,1] = -rot[:3,2]
                    new_rot[:3,2] = rot[:3,1]
                    
                    handle_pose.orientation = tfx.tb_angles(new_rot)
                """

                #if rot[2,2] > 0:
                #    self.pose_pub.publish(handle_pose.msg.PoseStamped())
                #    return

                handles.append(handle_pose)

                #self.pose_pub.publish(handle_pose.msg.PoseStamped())
                #return

        if len(handles) == 0:
            return

        #handles_base = [tfx.convertToFrame(handle, 'base_link') for handle in handles]
        handles_base = list()
        for handle in handles:
            cam_to_tool_frame = tfx.lookupTransform('r_gripper_tool_frame',
                                                    handle.frame)
            tool_frame_to_base = tfx.lookupTransform('base_link',
                                                     'r_gripper_tool_frame')
            handles_base.append(tool_frame_to_base *
                                (cam_to_tool_frame * handle))

        sorted_handles_base = sorted(handles_base, key=lambda h: h.position.y)

        self.pose_pub.publish(sorted_handles_base[-1].msg.PoseStamped())
Example #34
0
 def callback(self, msg):
     handles = list()
     for handle in msg.handles:
         for cylinder in handle.cylinders:
             handle_pose = tfx.pose(cylinder.pose, stamp=msg.header.stamp, frame=msg.header.frame_id)
 
             if handle_pose.orientation.matrix[2,2] > 0:
                 continue
             
             
             g = np.array([cylinder.axis.x, cylinder.axis.y, cylinder.axis.z])
             r = np.array([cylinder.normal.x, cylinder.normal.y, cylinder.normal.z])
             b = np.cross(r, g)
             
             g = -g
             b = -b
             
             
             rot = np.zeros((3,3))
             rot[:,0] = r / np.linalg.norm(r)
             rot[:,1] = g / np.linalg.norm(g)
             rot[:,2] = b / np.linalg.norm(b)
             
             handle_pose.orientation = tfx.tb_angles(np.dot(tfx.tb_angles(-90,0,0).matrix,rot))
     
             """
             rot = np.array(handle_pose.orientation.matrix)
             if rot[2,2] > 0:
                 # facing wrong way
                 new_rot = np.zeros((3,3))
                 
                 #new_rot[:3,0] = rot[:3,1]
                 #new_rot[:3,1] = rot[:3,0]
                 #new_rot[:3,2] = -rot[:3,2]
                 
                 new_rot[:3,0] = rot[:3,0]
                 new_rot[:3,1] = -rot[:3,2]
                 new_rot[:3,2] = rot[:3,1]
                 
                 handle_pose.orientation = tfx.tb_angles(new_rot)
             """
             
             #if rot[2,2] > 0:
             #    self.pose_pub.publish(handle_pose.msg.PoseStamped())
             #    return
             
             handles.append(handle_pose)
             
             #self.pose_pub.publish(handle_pose.msg.PoseStamped())
             #return
             
     if len(handles) == 0:
         return
             
     #handles_base = [tfx.convertToFrame(handle, 'base_link') for handle in handles]
     handles_base = list()
     for handle in handles:
         cam_to_tool_frame = tfx.lookupTransform('r_gripper_tool_frame', handle.frame)
         tool_frame_to_base = tfx.lookupTransform('base_link', 'r_gripper_tool_frame')
         handles_base.append(tool_frame_to_base*(cam_to_tool_frame*handle))
                 
     sorted_handles_base = sorted(handles_base, key=lambda h: h.position.y)
     
     self.pose_pub.publish(sorted_handles_base[-1].msg.PoseStamped())
Example #35
0
def test_tf():
    tfx.lookupTransform('camera_rgb_optical_frame', 'r_gripper_tool_frame')
    IPython.embed()
Example #36
0
def kinect_transform_publisher():
    global head_pose, hand_pose
    head_pose = None
    hand_pose = None
    dummy = tfx.transform([0, 0, 0])
    #pub = rospy.Publisher('kinect_transform', )
    pub = tfx.TransformBroadcaster()
    rospy.init_node('kinect_transform_publisher', anonymous=True)

    rospy.Subscriber("/hand_kinect_ar_kinect_pose", ARMarkers, hand_callback)
    rospy.Subscriber("/head_kinect_ar_kinect_pose", ARMarkers, head_callback)
    transformer = tfx.TransformListener()
    r = rospy.Rate(5)

    #head_pose = True # REMOVE THIS
    #head_pose = False # REMOVE THIS
    i = 0
    transforms = []
    while not rospy.is_shutdown() and i < 500:
        if head_pose and hand_pose:
            head_transform = tfx.transform(head_pose,
                                           parent='camera_rgb_optical_frame',
                                           child='ar_frame')  #.as_transform()
            hand_transform = tfx.transform(hand_pose,
                                           parent='hand_kinect_optical_frame',
                                           child='ar_frame')  #.as_transform()
            #head_to_ar = tfx.transform(transformer.lookupTransform('ar_frame', 'camera_rgb_optical_frame', rospy.Time()), parent='camera_rgb_optical_frame', child='ar_frame')
            #ar_to_hand = tfx.transform(transformer.lookupTransform('hand_kinect_optical_frame', 'ar_frame', rospy.Time()), parent='ar_frame', child='hand_kinect_optical_frame')
            head_to_hand = tfx.transform(head_transform.matrix *
                                         hand_transform.inverse().matrix,
                                         parent='camera_rgb_optical_frame',
                                         child='hand_kinect_optical_frame')
            #head_to_hand = tfx.transform(head_to_ar.inverse().matrix * ar_to_hand.matrix, parent='camera_rgb_optical_frame', child='hand_kinect_optical_frame')
            #rospy.loginfo(head_transform)
            #rospy.loginfo(hand_transform.inverse())
            #rospy.loginfo(head_to_ar)
            #rospy.loginfo(ar_to_hand)
            #print head_to_hand
            wrist_to_head = tfx.lookupTransform('r_gripper_tool_frame',
                                                'camera_rgb_optical_frame')
            #wrist_to_head = tfx.transform(transformer.lookupTransform('r_gripper_tool_frame', 'camera_rgb_optical_frame', rospy.Time()), child='camera_rgb_optical_frame', parent='r_gripper_tool_frame')
            wrist_to_hand = tfx.transform(wrist_to_head.matrix *
                                          head_to_hand.matrix,
                                          parent='r_gripper_tool_frame',
                                          child='hand_kinect_optical_frame')
            #print wrist_to_head
            print wrist_to_hand
            print
            #rospy.loginfo(wrist_to_head)
            #rospy.loginfo(wrist_to_hand)
            #pub.sendTransform(wrist_to_hand.position, wrist_to_hand.rotation, rospy.Time(), wrist_to_hand.child, wrist_to_hand.parent)
            #pub.sendTransform(head_to_hand.position, head_to_hand.rotation, rospy.Time(), head_to_hand.child, head_to_hand.parent)
            transforms.append(wrist_to_hand)
            i += 1
        else:
            if head_pose is None:
                print('Head pose is None')
            if hand_pose is None:
                print('Hand pose is None')
            print('')
        r.sleep()
    transform = transforms[0]
    for i in range(1, len(transforms)):
        transform = transform.interpolate(transforms[i], 1 / len(transforms))
    transform.child = "camera_rgb_optical_frame"
    print transform
    rospack = rospkg.RosPack()
    os.chdir(rospack.get_path("kinect_transform"))
    s = '<launch>\n'
    s += '<node pkg="tf" type="static_transform_publisher" name="static3" args="'
    for value in transform.position.list + transform.quaternion.list:
        s += str(value) + ' '
    s += transform.parent + ' ' + transform.child + ' '
    s += '100" />\n'
    s += '</launch>'
    try:
        f = open("launch/wrist_to_hand.launch", "w")
        f.write(s)
    finally:
        f.close()
Example #37
0
    return simple_mesh


# END generate_mesh


def get_xyz_world_frame(cloud):
    xyz1 = cloud.to2dArray()
    xyz1[:, 3] = 1
    return xyz1.dot(T_w_k.T)[:, :3]


rospy.init_node('listener', anonymous=True)
#T_w_k = np.eye(4)
T_w_k = np.array(
    tfx.lookupTransform('/world', '/camera_depth_optical_frame').matrix)


def isValidMesh(mesh):
    vertices = mesh.getVertices()

    for vertex in vertices:
        if vertex[2] > .58:
            return False

    return True


env = openravepy.Environment()
env.Load('/home/annal/src/RavenDebridement/models/myRaven.xml')
env.SetViewer('qtcoin')  # attach viewer (optional)
Example #38
0
def test_tf():
    tfx.lookupTransform('camera_rgb_optical_frame','r_gripper_tool_frame')
    IPython.embed()
    def __init__(self, armNames, errorModel=None, thread=True, withWorkspace=False, addNoise=False):
        if isinstance(armNames,basestring):
            armNames = [armNames]
        self.armNames = sorted(armNames)
        self.errorModel = errorModel
        self.refFrame = raven_constants.Frames.Link0
        self.addNoise = addNoise

        self.env = rave.Environment()
        #self.env.SetViewer('qtcoin')

        rospy.loginfo('Before loading model')
        if withWorkspace:
            ravenFile = os.path.join(roslib.packages.get_pkg_subdir('RavenDebridement','models'),'raven_with_workspace.zae')
        else:
            ravenFile = os.path.join(roslib.packages.get_pkg_subdir('RavenDebridement','models'),'myRaven.xml')
        #ravenFile = '/home/gkahn/ros_workspace/RavenDebridement/models/myRaven.xml'
        self.env.Load(ravenFile)
        rospy.loginfo('After loading model')
        
        self.robot = self.env.GetRobots()[0]
        
        self.invKinArm = dict()
        self.toolFrame = dict()
        self.manipName = dict()
        self.manip = dict()
        self.manipJoints = dict()
        self.raveJointNames = dict()
        self.raveJointTypes = dict()
        self.raveJointTypesToRos = dict()
        self.rosJointTypesToRave = dict()
        self.raveGrasperJointNames = dict()
        self.raveGrasperJointTypes = dict()
        
        self.trajRequest = dict()
        
        self.trajEndJoints = dict()
        self.trajEndGrasp = dict()
        self.trajEndPose = dict()
        
        self.trajStartJoints = dict()
        self.trajStartGrasp = dict()
        self.trajStartPose = dict()
        
        self.trajSteps = dict()
        
        self.jointTraj = dict() # for debugging
        self.poseTraj = dict()
        self.deltaPoseTraj = dict()
        
        self.approachDir = dict()
        
        activeDOFs = []
        for armName in self.armNames:
            self._init_arm(armName)
            activeDOFs += self.raveJointTypes[armName]
            
        self.robot.SetActiveDOFs(activeDOFs)
        
        self.currentState = None
        rospy.Subscriber(raven_constants.RavenTopics.RavenState, RavenState, self._ravenStateCallback)
        
        self.start_pose_pubs = dict((armName, rospy.Publisher('planner_%s_start' % armName,PoseStamped)) for armName in self.armNames)
        self.end_pose_pubs = dict((armName, rospy.Publisher('planner_%s_end' % armName,PoseStamped)) for armName in self.armNames)
        
        self.trajopt_pub = rospy.Publisher('trajopt',TrajoptCall)
        self.trajopt_marker_pub = rospy.Publisher('trajopt_marker',Marker)
        
        if withWorkspace:
            # setup variables for kinect constraint generation
            self.cloud_id = 0
            self.num_cd_components = 150
            self.decimation_rate = 0.15
            self.geom_type = 'cd'
            self.kinect_topic = '/camera/depth/points'
            self.kinect_depth_frame = '/camera_depth_optical_frame'
            self.robot_frame = '/world'
            self.cd_body_names = []
        
            # get transform to apply to cloud - it is in the kinect frame by default
            self.T_kinect_to_robot = tfx.lookupTransform(self.robot_frame, self.kinect_depth_frame, wait=20)
            
            workspace_file = rospy.get_param('workspace_constraints')
            workspace_file = os.path.join(roslib.packages.get_pkg_subdir('raven_2_params', 'data'), workspace_file)
            self.load_workspace(workspace_file)
                
        self.lock = threading.RLock()
        if thread:
            self.thread = threading.Thread(target=self.optimizeLoop)
            self.thread.setDaemon(True)
            self.thread.start()