def __init__(self, arm=Constants.Arm.Left):
        self.arm = arm

        if self.arm == Constants.Arm.Left:
            self.toolframe = Constants.Frames.LeftTool
        else:
            self.toolframe = Constants.Frames.RightTool

        self.transFrame = Constants.Frames.Link0
        self.rotFrame = self.toolframe
            
        self.gripperControl = GripperControlClass(arm, tf.TransformListener())
        self.imageDetector = ARImageDetectionClass()
                
        self.homePose = self.imageDetector.getHomePose()

        # ar_frame is the target
        self.ar_frame = '/stereo_33'
        self.objectPose = tfx.pose([0,0,0], frame=self.ar_frame)
        tf_ar_to_link0 = tfx.lookupTransform(Constants.Frames.Link0, self.ar_frame, wait=5)
        self.objectPose = tf_ar_to_link0 * self.objectPose
        self.objectPose = tfx.pose(self.objectPose.position, tfx.tb_angles(-90,90,0))
        self.objectPose.position.y -= .002

        rospy.sleep(3)
  def succesorState(self):
    ''' Updates current state to the next succesor state
      by default it just returns current state
    '''
    rightArmPose = self.rightArm.get_pose()
    leftArmPose = self.leftArm.get_pose()
    newRightArmPose = tfx.pose(rightArmPose) # Create new pose
    newLeftArmPose = tfx.pose(leftArmPose) # Create new pose

    ''' ------ CHANGE CODE BELOW THIS POINT ------ '''

    newRightArmPose.position = [0.632,  -0.740,  0.772] # either update by adding to xyz offset array "+ [0, -pos_step, 0]" or assign new position, defualt keeps position the same
    newLeftArmPose.position = [0.632,  0.177,  0.772]

    ''' ------ CHANGE CODE ABOVE THIS POINT ------ '''


    newRightJoints = self.rightArm.ik(newRightArmPose) # Updates arm pose if valid
    newLeftJoints = self.leftArm.ik(newLeftArmPose)
    if newRightJoints is not None:
      self.rightArm.go_to_joints(newRightJoints)
      print('new_pose: {0}'.format(newRightArmPose))
    else:
      rospy.loginfo('Invalid Right Arm Pose')
    rospy.sleep(.01)
    if newLeftJoints is not None:
      self.leftArm.go_to_joints(newLeftJoints)
    else:
      rospy.loginfo('Invalid Left Arm Pose')
    rospy.sleep(.01)
    return self # Modify to generate a valid succesor states, does not update state if pose is invalid and instead reverts to last valid state
Exemplo n.º 3
0
    def goToGripperPose(self,
                        endPose,
                        startPose=None,
                        block=True,
                        duration=None,
                        speed=None,
                        ignoreOrientation=False):
        """        
        Move to endPose

        If startPose is not specified, default to current pose according to tf
        (w.r.t. Link0)
        """

        if startPose == None:
            startPose = self.getGripperPose()
            if startPose == None:
                return

        startPose = raven_util.convertToFrame(tfx.pose(startPose),
                                              self.commandFrame)
        endPose = raven_util.convertToFrame(tfx.pose(endPose),
                                            self.commandFrame)

        if ignoreOrientation:
            endPose.orientation = startPose.orientation
        self.ravenController.goToPose(endPose,
                                      start=startPose,
                                      duration=duration,
                                      speed=speed)

        if block:
            return self.blockUntilPaused()
        return True
Exemplo n.º 4
0
    def execute(self, userdata):
        if MasterClass.PAUSE_BETWEEN_STATES:
           pause_func(self)

        rospy.loginfo('Moving to receptacle')
        # move to receptacle with object

        currPose = tfx.pose(self.ravenArm.getGripperPose())
        receptaclePose = tfx.pose(userdata.receptaclePose)    
        #ignore orientation
        receptaclePose.orientation = currPose.orientation

        print 'getting trajectory'
        endPoseTraj = self.ravenPlanner.getTrajectoryFromPose(self.ravenArm.name, receptaclePose)
        print 'got receptacle trajectory', endPoseTraj is None

        if endPoseTraj != None:
            self.ravenArm.executePoseTrajectory(endPoseTraj)
        

        rospy.loginfo('Opening the gripper')
        # open gripper (consider not all the way)
        #self.ravenArm.openGripper(duration=self.gripperOpenCloseDuration)
        self.ravenArm.setGripper(0.75)
        return 'success'
def main():
    bag = rosbag.Bag(sys.argv[1])

    output_bag_name = sys.argv[2]

    # pose_topic = '/dvrk_psm1/joint_position_cartesian'
    # gripper_angle_topic = '/dvrk_psm1/gripper_position'

    # pose_and_angle_tuples = {}
    # i = 0

    pose_topic_R = '/dvrk_psm1/joint_position_cartesian'
    gripper_angle_topic_R = '/dvrk_psm1/gripper_position'

    pose_topic_L = '/dvrk_psm2/joint_position_cartesian'
    gripper_angle_topic_L = '/dvrk_psm2/gripper_position'

    pose_and_angle_tuples_R = {}
    pose_and_angle_tuples_L = {}
    i = 0
    j = 0
    curr_angle_R = None
    curr_pose_R = None
    curr_angle_L = None
    curr_pose_L = None

    for topic, msg, t in bag.read_messages(topics=[pose_topic_R, gripper_angle_topic_R, 
        pose_topic_L, gripper_angle_topic_L]):
        if topic == gripper_angle_topic_R:
            curr_angle_R = msg.data
        elif topic == pose_topic_R:
            curr_pose_R = tfx.pose(msg)
        elif topic == gripper_angle_topic_L:
            curr_angle_L = msg.data
        else:
            curr_pose_L = tfx.pose(msg)

        if curr_pose_R != None and curr_angle_R != None:

            data = [curr_pose_R.position.x, curr_pose_R.position.y, curr_pose_R.position.z, curr_pose_R.rotation.x,
                curr_pose_R.rotation.y, curr_pose_R.rotation.z, curr_pose_R.rotation.w, curr_pose_R.stamp.seconds, curr_angle_R]
            pose_and_angle_tuples_R[i] = data
            i += 1
            curr_angle_R = None
            curr_pose_R = None

        if curr_pose_L != None and curr_angle_L != None:
            data = [curr_pose_L.position.x, curr_pose_L.position.y, curr_pose_L.position.z, curr_pose_L.rotation.x,
                curr_pose_L.rotation.y, curr_pose_L.rotation.z, curr_pose_L.rotation.w, curr_pose_L.stamp.seconds, curr_angle_L]
            # pair = (curr_pose_L, curr_angle_L)
            pose_and_angle_tuples_L[j] = data
            j += 1
            curr_angle_L = None
            curr_pose_L = None
    IPython.embed()
    traj = (pose_and_angle_tuples_L.values(), pose_and_angle_tuples_R.values())
    # IPython.embed()
    pickle.dump(traj, open(output_bag_name, 'wb' ))

    bag.close()
Exemplo n.º 6
0
def deltaPose(currPose, desPose, posFrame=None, rotFrame=None):
    """
    Returns pose0 - pose1
    """

    currPose, desPose = tfx.pose(currPose), tfx.pose(desPose)
    currPoseFrame, desPoseFrame = currPose.frame, desPose.frame
    
    currPos, desPos = currPose.position, desPose.position
    currRot, desRot = currPose.orientation, desPose.orientation

    if posFrame is not None and currPoseFrame is not None:
        tf_currPos_to_posFrame = tfx.lookupTransform(posFrame, currPoseFrame, wait=10)
        currPos = tf_currPos_to_posFrame * currPos

        tf_desPos_to_posFrame = tfx.lookupTransform(posFrame, desPoseFrame, wait=10)
        desPos = tf_desPos_to_posFrame * desPos

    if rotFrame is not None and currPoseFrame is not None:
        tf_currRot_to_rotFrame = tfx.lookupTransform(rotFrame, currPoseFrame, wait=10)
        currRot = tf_currRot_to_rotFrame * currRot

        tf_desRot_to_rotFrame = tfx.lookupTransform(rotFrame, desPoseFrame, wait=10)
        desRot = tf_desRot_to_rotFrame * desRot

    deltaPosition = desPos.array - currPos.array
    
    currQuat, desQuat = tfx.tb_angles(currRot).quaternion, tfx.tb_angles(desRot).quaternion
    deltaQuat = tft.quaternion_multiply(tft.quaternion_inverse(currQuat), desQuat)

    deltaPose = tfx.pose(deltaPosition, deltaQuat)

    return deltaPose
 def raiseArms(self):
     rightArmPose = self.rightArm.get_pose()
     leftArmPose = self.leftArm.get_pose()
     newRightArmPose = tfx.pose(rightArmPose)  # Create new pose
     newLeftArmPose = tfx.pose(leftArmPose)  # Create new pose
     newRightArmPose.position = (
         RIGHT_UP
     )  # either update by adding to xyz offset array "+ [0, -pos_step, 0]" or assign new position, defualt keeps position the same
     newLeftArmPose.position = LEFT_UP
     newRightJoints = self.rightArm.ik(newRightArmPose)  # Updates arm pose if valid
     newLeftJoints = self.leftArm.ik(newLeftArmPose)
     if newRightJoints is not None:
         self.rightArm.go_to_joints(newRightJoints)
         print ("new_pose: {0}".format(newRightArmPose))
         self.valid = True
     else:
         rospy.loginfo("Invalid Right Arm Pose")
         print newRightArmPose
         self.valid = False
         return self
     rospy.sleep(0.01)
     if newLeftJoints is not None:
         self.leftArm.go_to_joints(newLeftJoints)
     else:
         rospy.loginfo("Invalid Left Arm Pose")
     rospy.sleep(0.01)
     return self
Exemplo n.º 8
0
    def add_point_to_point_with_lift(self,
                                     name,
                                     start_pos,
                                     end_pos,
                                     orientation,
                                     height,
                                     arm=None,
                                     duration=None,
                                     speed=None):
        start = tfx.pose(start_pos, orientation)
        end = tfx.pose(end_pos, orientation)
        if duration is None:
            if speed is None:
                speed = self.default_speed
            duration = end.position.distance(start.position) / speed

        def fn(cmd, t):
            pose = start.interpolate(end, t)

            pose.position.z += sin(t * pi) * height

            tool_pose = pose.msg.Pose()

            TrajectoryPlayer.add_arm_pose_cmd(cmd, self._check(arm), tool_pose)

        self.add_stage(name, duration, fn)
Exemplo n.º 9
0
 def segment_through_pixel(self, pixel, start_dist=None):
     """
     Returns segment from camera origin through pixel
     
     :param pixel: 2d list/np.array
     :return geometry3d.Segment (with endpoints in frame 'base_link')
     """
     # assert (0 <= pixel[0] <= self.height) and (0 <= pixel[1] <= self.width)
     start_dist = start_dist if start_dist is not None else self.min_range
     
     pixel = np.array(pixel)
     pixel_centered = pixel - np.array([self.height/2., self.width/2.])
     
     pixel3d_centered_m_min = start_dist*np.array([pixel_centered[1]/self.fx,
                                                   pixel_centered[0]/self.fy,
                                                   1])
     pixel3d_centered_m_max = self.max_range*np.array([pixel_centered[1]/self.fx,
                                                      pixel_centered[0]/self.fy,
                                                      1])
     
     transform = self.get_pose().as_tf()
     p0 = (transform*tfx.pose(pixel3d_centered_m_min)).position.array 
     p1 = (transform*tfx.pose(pixel3d_centered_m_max)).position.array        
     
     return geometry3d.Segment(p0, p1)
Exemplo n.º 10
0
def test_config1():
    jl = jointDeg2rad({
        0: 20.6,
        1: 79.0,
        2: -.112,
        4: -84.3,
        5: 29.5,
        6: 32.8,
        7: 29.2
    })
    pl = tfx.pose((0.008, -0.001, -0.131),
                  tfx.tb_angles(24.7, 62.2, 179.6),
                  frame='/0_link')
    gl = 61.994 * pi / 180.

    jr = jointDeg2rad({
        0: 6.3,
        1: 107.1,
        2: -.085,
        4: 20.0,
        5: -2.8,
        6: -41.9,
        7: -15.1
    })
    pr = tfx.pose((-0.109, 0.018, -0.098),
                  tfx.tb_angles(54.3, 71.0, 165.0),
                  frame='/0_link')
    gr = 57.009 * pi / 180.
    return jl, pl, gl, jr, pr, gr
Exemplo n.º 11
0
    def goToPose(self, end, start=None, duration=None, speed=None):
        if start == None:
            start = self.currentPose
            if start == None:
                rospy.loginfo('Have not received currentPose yet, aborting goToPose')
                return

        start = tfx.pose(start)
        end = tfx.pose(end)
        
        if duration is None:
            if speed is None:
                speed = self.defaultPoseSpeed
            duration = end.position.distance(start.position) / speed
        '''
        print
        print 'POSE', end
        print
        '''
        def fn(cmd, t, start=start, end=end, arm=self.arm):
            pose = start.interpolate(end, t)
            
            toolPose = pose.msg.Pose()
            '''
            if t == 1:
                print
                print 'END', toolPose
                print
            '''
            # not sure if correct
            cmd.controller = Constants.CONTROLLER_CARTESIAN_SPACE
            RavenController.addArmPoseCmd(cmd, arm, toolPose)

        self.addStage('goToPose', duration, fn)
Exemplo n.º 12
0
    def execute(self, userdata):
        print "State: IdentifyCutPoint"

        # rospy.loginfo('Enter to Identity Cut Point')
        # raw_input()

        currPoseRight = self.davinciArmRight.getGripperPose()
        currPoseRight = currPoseRight.as_tf() * tfx.pose(
            tfx.tb_angles(180, 0, 0)).as_tf() * tfx.pose(
                tfx.tb_angles(0, -75, 0))
        currPoseRight.position.y += 0.009
        currPoseRight.position.z += -0.03
        currPoseRight.position.x += 0.004
        currPoseRight = currPoseRight.as_tf() * tfx.pose(
            tfx.tb_angles(180, 0, 0))
        currPoseRight.stamp = None
        cutPointCurr = tfx.convertToFrame(currPoseRight,
                                          '/one_remote_center_link')
        self.cut_point_pub.publish(cutPointCurr.msg.PoseStamped())
        # rospy.loginfo('Check cut point')
        # raw_input()

        userdata.cutPoint = cutPointCurr

        return 'success'
Exemplo n.º 13
0
 def __init__(self):
     self.outcomes = None
     self.homePoseLeft = tfx.pose([-0.04907726751924995, 0.027288735500984575, -0.1211606501908539],
         (0.9835887535507493, -0.09932464655036198, -0.14884734393715604, 0.023070472014753367))
     self.homePoseRight = tfx.pose([0.061241857051286236, 0.014307808069346816, -0.10446866837544996],
         (-0.9689889616996428, 0.1939060552483166, -0.1474787309756946, 0.04136251626876463))
     rospy.sleep(5)
Exemplo n.º 14
0
def test_image_rays():
    env = rave.Environment()
    env.Load('../envs/pr2-test.env.xml')
    env.SetViewer('qtcoin')
    env.GetViewer().SendCommand('SetFiguresInCamera 1') # also shows the figures in the image
    time.sleep(1)
    robot = env.GetRobots()[0]
    
    cam = robot.GetAttachedSensor('head_cam').GetSensor()
    type = rave.Sensor.Type.Camera
    cam_geom = cam.GetSensorGeometry(type)
    
    depth = robot.GetAttachedSensor('head_depth').GetSensor()
    type = rave.Sensor.Type.Laser
    depth_geom = depth.GetSensorGeometry(type)
    
    #cam.Configure(rave.Sensor.ConfigureCommand.PowerOn)
    #cam.Configure(rave.Sensor.ConfigureCommand.RenderDataOn)
    
    #cam_pose = tfx.pose(cam.GetTransform())
    #cam_pose.position.z += .32
    
    cam_pose = tfx.pose([0,0,0.05], frame='wide_stereo_optical_frame')
    cam_pose_world = tfx.pose(utils.openraveTransformFromTo(robot, cam_pose.matrix, cam_pose.frame, 'world'))
    img_plane_center = cam_pose + [0, 0, .01]
    
    global handles
    img_plane_world = tfx.pose(utils.openraveTransformFromTo(robot, img_plane_center.matrix, cam_pose.frame, 'world'))
    #handles.append(utils.plot_point(env, img_plane_world.position.array, size=.0005))
    
    height, width, _ = cam_geom.imagedata.shape
    f = cam_geom.KK[0,0]
    F = .01 # real focal length in meters
    
    W = F*(width/f)
    H = F*(height/f)
    
    width_offsets = np.linspace(-W/2.0, W/2.0, 64)
    height_offsets = np.linspace(-H/2.0, H/2.0, 48)
    
    directions = np.zeros((len(width_offsets)*len(height_offsets), 3))
    
    index = 0
    for w_offset in width_offsets:
        for h_offset in height_offsets:
            p = img_plane_center + [w_offset, h_offset, 0]
            p_world = tfx.pose(utils.openraveTransformFromTo(robot, p.matrix, p.frame, 'world'))
            direction = (p_world.position.array - cam_pose_world.position.array)
            direction = 5 * direction/np.linalg.norm(direction)
            directions[index,:] = direction
            index += 1
            
            #closest_collision(env, cam_pose_world.position.array, direction, plot=False)
            #handles.append(utils.plot_point(env, p_world.position.array, size=.0001))
    start_time = time.time()
    closest_collisions(env, cam_pose_world.position.array, directions, plot=False)
    print('Total time: {0}'.format(time.time() - start_time))
    
    IPython.embed()
    rave.RaveDestroy()
Exemplo n.º 15
0
    def predictSinglePose(self, armName, curPose, prevPose, dt=1.0):
        if dt <= 0:
            print 'Error: Illegal timestamp'
            return None

        # Convert pose to numpy matrix
        curTrans = tft.translation_matrix([curPose.position.x, curPose.position.y, curPose.position.z])
        curRot = tft.quaternion_matrix([curPose.orientation.x, curPose.orientation.y ,curPose.orientation.z, curPose.orientation.w])
        curPoseMatrix = np.dot(curTrans, curRot)

        prevTrans = tft.translation_matrix([prevPose.position.x, prevPose.position.y, prevPose.position.z])
        prevRot = tft.quaternion_matrix([prevPose.orientation.x, prevPose.orientation.y ,prevPose.orientation.z, prevPose.orientation.w])
        prevPoseMatrix = np.dot(prevTrans, prevRot)
        
        deltaPoseMatrix = np.linalg.inv(prevPoseMatrix).dot(curPoseMatrix)
        deltaAngles = euler_from_matrix(deltaPoseMatrix[:3,:3])
        deltaPos = deltaPoseMatrix[:3,3]

        #deltaAngles = np.array([a / dt for a in deltaAngles])
        deltaPos = deltaPos / dt
        #deltaPoseMatrix = euler_matrix(deltaAngles[0], deltaAngles[1], deltaAngles[2])
        deltaPoseMatrix[:3,3] = deltaPos

        gpList, sysList = self.predict(armName, [curPoseMatrix], [deltaPoseMatrix])
        return tfx.pose(gpList[0], frame=curPose.frame, stamp=curPose.stamp), tfx.pose(sysList[0], frame=curPose.frame, stamp=curPose.stamp)
Exemplo n.º 16
0
    def left_image_callback(self, msg):
        if rospy.is_shutdown():
            return
        self.left_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
        self.left_points = self.process_image(self.left_image)

        poses = []
        if self.right_points is not None and self.left_points is not None:
            self.left_points.sort(key=lambda x: x[0])
            self.right_points.sort(key=lambda x: x[0])
            disparities = self.assign_disparities(self.left_points,
                                                  self.right_points)
            for i in range(len(self.left_points)):
                x = self.left_points[i][0]
                y = self.left_points[i][1]
                disparity = disparities[i]
                print x, y, disparity
                pt = Util.convertStereo(x, y, disparity, self.info)
                pt = tfx.point(pt)
                pt = tfx.convertToFrame(pt, '/two_remote_center_link')
                pose = tfx.pose(pt)
                pose = pose.as_tf() * tfx.pose(tfx.tb_angles(
                    -90, 0, 180)).as_tf() * tfx.pose(tfx.tb_angles(90, 0, 0))
                poses.append(pose)

        print poses

        pose_array = PoseArray()
        pose_array.header = poses[0].msg.PoseStamped().header
        for pose in poses:
            pose_array.poses.append(pose)
        self.grasp_poses_pub.publish(pose_array)
Exemplo n.º 17
0
def move(arm, pos, rot, speed='Slow'):
    """ Handles the different speeds we're using.
    
    Parameters
    ----------
    arm: [dvrk arm]
        The current DVK arm.
    pos: [list]
        The desired position.
    rot: [list]
        The desired rotation, in list form with yaw, pitch, and roll.
    SPEED_CLASS: [String]
        Slow, Medium, or Fast, case insensitive.
    """
    if pos[2] < -0.17:
        raise ValueError("Desired z-coord of {} is not safe! Terminating!".format(pos[2]))
    if speed.lower() == 'slow':
        arm.move_cartesian_frame_linear_interpolation(
                tfx.pose(pos, tfx.tb_angles(rot[0],rot[1],rot[2])), 0.03
        )
    elif speed.lower() == 'medium':
        arm.move_cartesian_frame_linear_interpolation(
                tfx.pose(pos, tfx.tb_angles(rot[0],rot[1],rot[2])), 0.06
        )
    elif speed.lower() == 'fast':
        arm.move_cartesian_frame(tfx.pose(pos, tfx.tb_angles(rot[0],rot[1],rot[2])))
    else:
        raise ValueError()
    def alignGrippers(self):
        rightArmPose = self.rightArm.get_pose()
        leftArmPose = self.leftArm.get_pose()
        newLeftArmPose = tfx.pose(leftArmPose.position, tfx.tb_angles(0.0, 0.0, 0.0))  # Create new pose

        newRightArmPose = tfx.pose(rightArmPose.position, tfx.tb_angles(0.0, 0.0, 0.0))
        newRightJoints = self.rightArm.ik(newRightArmPose)  # Updates arm pose if valid
        newLeftJoints = self.leftArm.ik(newLeftArmPose)
        if newRightJoints is not None:
            self.rightArm.go_to_joints(newRightJoints)
            print ("new_pose: {0}".format(newRightArmPose))
            print "Aligning right gripper..."
            self.rightArm.open_gripper()
            self.valid = True
        else:
            rospy.loginfo("Invalid Right Arm Pose")
            print "RIGHT ALIGNMENT FAILED"
            self.valid = False
            return self
        rospy.sleep(0.5)
        if newLeftJoints is not None:
            self.leftArm.go_to_joints(newLeftJoints)
            print "Aligning left gripper..."
            self.leftArm.open_gripper()
        else:
            rospy.loginfo("Invalid Left Arm Pose")
            print "LEFT ALIGNMENT FAILED"
        rospy.sleep(0.5)
Exemplo n.º 19
0
    def execute(self, userdata):
        if MasterClass.PAUSE_BETWEEN_STATES:
            pause_func(self)
            
        rospy.loginfo('Planning trajectory from gripper to object')
        
        objectPose = tfx.pose(userdata.objectPose)
        gripperPose = tfx.pose(userdata.gripperPose)
        
        #objectPose.orientation = gripperPose.orientation

        transBound = .006
        rotBound = float("inf")
        if Util.withinBounds(gripperPose, objectPose, transBound, rotBound, self.transFrame, self.rotFrame):
            rospy.loginfo('Closing the gripper')
            self.ravenArm.closeGripper(duration=self.gripperOpenCloseDuration)
            userdata.vertAmount = .04
            return 'reachedObject'

        deltaPose = tfx.pose(Util.deltaPose(gripperPose, objectPose, self.transFrame, self.rotFrame))

        endPose = Util.endPose(self.ravenArm.getGripperPose(), deltaPose)
        n_steps = int(self.stepsPerMeter * deltaPose.position.norm) + 1
        poseTraj = self.ravenPlanner.getTrajectoryFromPose(self.ravenArm.name, endPose, n_steps=n_steps)
        
        rospy.loginfo('deltaPose')
        rospy.loginfo(deltaPose)
        rospy.loginfo('n_steps')
        rospy.loginfo(n_steps)

        if poseTraj == None:
            return 'failure'

        userdata.poseTraj = poseTraj
        return 'notReachedObject'
Exemplo n.º 20
0
def deltaPose(currPose, desPose, posFrame=None, rotFrame=None):
    """
    Returns pose0 - pose1
    """

    currPose, desPose = tfx.pose(currPose), tfx.pose(desPose)
    currPoseFrame, desPoseFrame = currPose.frame, desPose.frame
    
    currPos, desPos = currPose.position, desPose.position
    currRot, desRot = currPose.orientation, desPose.orientation

    if posFrame is not None and currPoseFrame is not None:
        tf_currPos_to_posFrame = tfx.lookupTransform(posFrame, currPoseFrame, wait=10)
        currPos = tf_currPos_to_posFrame * currPos

        tf_desPos_to_posFrame = tfx.lookupTransform(posFrame, desPoseFrame, wait=10)
        desPos = tf_desPos_to_posFrame * desPos

    if rotFrame is not None and currPoseFrame is not None:
        tf_currRot_to_rotFrame = tfx.lookupTransform(rotFrame, currPoseFrame, wait=10)
        currRot = tf_currRot_to_rotFrame * currRot

        tf_desRot_to_rotFrame = tfx.lookupTransform(rotFrame, desPoseFrame, wait=10)
        desRot = tf_desRot_to_rotFrame * desRot

    deltaPosition = desPos.array - currPos.array
    
    currQuat, desQuat = tfx.tb_angles(currRot).quaternion, tfx.tb_angles(desRot).quaternion
    deltaQuat = tft.quaternion_multiply(tft.quaternion_inverse(currQuat), desQuat)

    deltaPose = tfx.pose(deltaPosition, deltaQuat)

    return deltaPose
Exemplo n.º 21
0
    def succesorState(self):
        ''' Updates current state to the next succesor state
      by default it just returns current state
    '''
        rightArmPose = self.rightArm.get_pose()
        leftArmPose = self.leftArm.get_pose()
        newRightArmPose = tfx.pose(rightArmPose)  # Create new pose
        newLeftArmPose = tfx.pose(leftArmPose)  # Create new pose
        ''' ------ CHANGE CODE BELOW THIS POINT ------ '''

        newRightArmPose.position = [
            0.632, -0.740, 0.772
        ]  # either update by adding to xyz offset array "+ [0, -pos_step, 0]" or assign new position, defualt keeps position the same
        newLeftArmPose.position = [0.632, 0.177, 0.772]
        ''' ------ CHANGE CODE ABOVE THIS POINT ------ '''

        newRightJoints = self.rightArm.ik(
            newRightArmPose)  # Updates arm pose if valid
        newLeftJoints = self.leftArm.ik(newLeftArmPose)
        if newRightJoints is not None:
            self.rightArm.go_to_joints(newRightJoints)
            print('new_pose: {0}'.format(newRightArmPose))
        else:
            rospy.loginfo('Invalid Right Arm Pose')
        rospy.sleep(.01)
        if newLeftJoints is not None:
            self.leftArm.go_to_joints(newLeftJoints)
        else:
            rospy.loginfo('Invalid Left Arm Pose')
        rospy.sleep(.01)
        return self  # Modify to generate a valid succesor states, does not update state if pose is invalid and instead reverts to last valid state
 def raiseArms(self):
     rightArmPose = self.rightArm.get_pose()
     leftArmPose = self.leftArm.get_pose()
     newRightArmPose = tfx.pose(rightArmPose)  # Create new pose
     newLeftArmPose = tfx.pose(leftArmPose)  # Create new pose
     newRightArmPose.position = RIGHT_UP  # either update by adding to xyz offset array "+ [0, -pos_step, 0]" or assign new position, defualt keeps position the same
     newLeftArmPose.position = LEFT_UP
     newRightJoints = self.rightArm.ik(
         newRightArmPose)  # Updates arm pose if valid
     newLeftJoints = self.leftArm.ik(newLeftArmPose)
     if newRightJoints is not None:
         self.rightArm.go_to_joints(newRightJoints)
         print('new_pose: {0}'.format(newRightArmPose))
         self.valid = True
     else:
         rospy.loginfo('Invalid Right Arm Pose')
         print newRightArmPose
         self.valid = False
         return self
     rospy.sleep(.01)
     if newLeftJoints is not None:
         self.leftArm.go_to_joints(newLeftJoints)
     else:
         rospy.loginfo('Invalid Left Arm Pose')
     rospy.sleep(.01)
     return self
Exemplo n.º 23
0
 def teleop(self):
     print('{0} arm teleop'.format(self.lrlong))
     
     pos_step = .01
     delta_position = {'a' : [0, pos_step, 0],
                       'd' : [0, -pos_step, 0],
                       'w' : [pos_step, 0, 0],
                       'x' : [-pos_step, 0, 0],
                       '+' : [0, 0, pos_step],
                       '-' : [0, 0, -pos_step]}
     
     angle_step = 2.0
     delta_angle = {'o' : [angle_step, 0, 0],
                    'p' : [-angle_step, 0, 0],
                    'k' : [0, angle_step, 0],
                    'l' : [0, -angle_step, 0],
                    'n' : [0, 0, angle_step],
                    'm' : [0, 0, -angle_step]}
     
     char = ''
     while char != 'q':
         char = utils.Getch.getch()
         pose = self.get_pose()
         new_pose = tfx.pose(pose)
         if delta_position.has_key(char):
             new_pose.position = pose.position.array + delta_position[char]
         ypr = np.array([pose.tb_angles.yaw_deg, pose.tb_angles.pitch_deg, pose.tb_angles.roll_deg])
         if delta_angle.has_key(char):
             ypr += np.array(delta_angle[char])
             new_pose = tfx.pose(pose.position, tfx.tb_angles(ypr[0], ypr[1], ypr[2]))
         self.set_pose(new_pose)    
         time.sleep(.01)
         
     print('{0} arm end teleop'.format(self.lrlong))
    def alignGrippers(self):
        rightArmPose = self.rightArm.get_pose()
        leftArmPose = self.leftArm.get_pose()
        newLeftArmPose = tfx.pose(leftArmPose.position,
                                  tfx.tb_angles(0.0, 0.0,
                                                0.0))  # Create new pose

        newRightArmPose = tfx.pose(rightArmPose.position,
                                   tfx.tb_angles(0.0, 0.0, 0.0))
        newRightJoints = self.rightArm.ik(
            newRightArmPose)  # Updates arm pose if valid
        newLeftJoints = self.leftArm.ik(newLeftArmPose)
        if newRightJoints is not None:
            self.rightArm.go_to_joints(newRightJoints)
            print('new_pose: {0}'.format(newRightArmPose))
            print "Aligning right gripper..."
            self.rightArm.open_gripper()
            self.valid = True
        else:
            rospy.loginfo('Invalid Right Arm Pose')
            print "RIGHT ALIGNMENT FAILED"
            self.valid = False
            return self
        rospy.sleep(0.5)
        if newLeftJoints is not None:
            self.leftArm.go_to_joints(newLeftJoints)
            print "Aligning left gripper..."
            self.leftArm.open_gripper()
        else:
            rospy.loginfo('Invalid Left Arm Pose')
            print "LEFT ALIGNMENT FAILED"
        rospy.sleep(0.5)
def test_servo():
    rospy.init_node('gripper_control',anonymous=True)
    rospy.sleep(2)
    listener = tf.TransformListener()
    gripperControl = GripperControlClass(arm, tf.TransformListener())
    rospy.sleep(2)

    gripperControl.start()

    rospy.loginfo('Press enter')
    raw_input()

    currPose = tfx.pose(gripperControl.getGripperPose(MyConstants.Frames.Link0))
    
    # w.r.t. Link0
    desPose = armDot

    
    transBound = .01
    rotBound = 25
    
    rate = rospy.Rate(1)

    while not Util.withinBounds(currPose, desPose, transBound, rotBound) and not rospy.is_shutdown():
        rospy.loginfo('LOOP!!!!!!!!!')
        
        gripperControl.pause()

        currPose = tfx.pose(gripperControl.getGripperPose(MyConstants.Frames.Link0))
    
        deltaPose = Util.deltaPose(currPose, desPose)
    
        gripperControl.goToGripperPoseDelta(gripperControl.getGripperPose(MyConstants.Frames.Link0), deltaPose)
        
        rate.sleep()
Exemplo n.º 26
0
    def succesorState(self):
        ''' Updates current state to the next succesor state
      by default it just returns current state
    '''
        rightArmPose = self.rightArm.get_pose()
        leftArmPose = self.leftArm.get_pose()
        newRightArmPose = tfx.pose(rightArmPose)  # Create new pose
        newLeftArmPose = tfx.pose(leftArmPose)  # Create new pose
        ''' ------ CHANGE CODE BELOW THIS POINT ------ '''
        if self.valid:
            x1, y1, z1 = TARGET_GREEN
            x2, y2, z2 = GRASP_GREEN
            currentX, currentY, currentZ = rightArmPose.position.array
            acceptableAbsError = 0.05
            if abs(x1 - currentX) <= acceptableAbsError and abs(
                    y1 - currentY) <= acceptableAbsError and abs(
                        z1 - currentZ) <= acceptableAbsError:
                newRightArmPose.position = GRASP_GREEN
            elif abs(x2 - currentX) <= acceptableAbsError and abs(
                    y2 - currentY) <= acceptableAbsError and abs(
                        z2 - currentZ) <= acceptableAbsError:
                self.rightArm.close_gripper()
                print "Grasping..."
                rospy.sleep(1)
                self.raiseArms()
                newRightArmPose.position = DEST_GREEN
            else:
                newRightArmPose.position = TARGET_GREEN  # either update by adding to xyz offset array "+ [0, -pos_step, 0]" or assign new position, defualt keeps position the same

        else:
            print "The last succesor state was invalid"
            print "Enter a custom pose x y z"
            coord = raw_input().split()
            newRightArmPose.position = [float(x) for x in coord]

        newLeftArmPose.position = leftArmPose.position
        ''' ------ CHANGE CODE ABOVE THIS POINT ------ '''

        newRightArmPose = tfx.pose(newRightArmPose.position,
                                   tfx.tb_angles(0.0, 0.0, 0.0))
        newRightJoints = self.rightArm.ik(
            newRightArmPose)  # Updates arm pose if valid
        newLeftJoints = self.leftArm.ik(newLeftArmPose)
        if newRightJoints is not None:
            self.rightArm.go_to_joints(newRightJoints)
            print('new_pose: {0}'.format(newRightArmPose))
            self.valid = True
        else:
            rospy.loginfo('Invalid Right Arm Pose')
            print newRightArmPose
            self.valid = False
            return self
        rospy.sleep(.01)
        if newLeftJoints is not None:
            self.leftArm.go_to_joints(newLeftJoints)
        else:
            rospy.loginfo('Invalid Left Arm Pose')
        rospy.sleep(.01)
        return self  # Modify to generate a valid succesor states, does not update state if pose is invalid and instead reverts to last valid state
Exemplo n.º 27
0
def testSwitchPlaces(show=True):
    #trajoptpy.SetInteractive(True)
    
    rospy.init_node('testSwitchPlaces',anonymous=True)
    rp = RavenPlanner([MyConstants.Arm.Left,MyConstants.Arm.Right], thread=True)
    
    #rightCurrPose = tfx.pose([-0.068,-0.061,-0.129],tfx.tb_angles(-139.6,88.5,111.8),frame=MyConstants.Frames.Link0)
    #leftCurrPose = tfx.pose([-.072,-.015,-.153],tfx.tb_angles(43.9,78.6,100.9),frame=MyConstants.Frames.Link0)
    
    leftCurrPose = Util.convertToFrame(tfx.pose([0,0,0],frame=rp.toolFrame[MyConstants.Arm.Left]),MyConstants.Frames.Link0)
    rightCurrPose = Util.convertToFrame(tfx.pose([0,0,0],frame=rp.toolFrame[MyConstants.Arm.Right]),MyConstants.Frames.Link0)

    #rp.getTrajectoryPoseToPose(MyConstants.Arm.Left, leftCurrPose, rightCurrPose, n_steps=50)
    #rp.getTrajectoryPoseToPose(MyConstants.Arm.Right, rightCurrPose, leftCurrPose, n_steps=50)
    
    rospy.loginfo('Press enter to get poses')
    raw_input()
    
    rp.getPoseTrajectory(MyConstants.Arm.Left, leftCurrPose+[0.01,0,0], n_steps=50)
    rp.getPoseTrajectory(MyConstants.Arm.Right, rightCurrPose-[0.01,0,0], n_steps=50)
    
    #IPython.embed()
    
    print 'waiting'
    rp.waitForTrajReady()
    print 'woooooooo'
    
    if rospy.is_shutdown():
        return
    
    #IPython.embed()
    
    if not show:
        return
    rp.env.SetViewer('qtcoin')
    
    leftPoseTraj = rp.poseTraj[MyConstants.Arm.Left]
    rightPoseTraj = rp.poseTraj[MyConstants.Arm.Right]
    
    for left, right in zip(leftPoseTraj,rightPoseTraj):
        if rospy.is_shutdown():
            break
        rp.updateOpenraveJoints('L', rp.getJointsFromPose('L', left, rp.getCurrentGrasp('L')), grasp=rp.getCurrentGrasp('L'))
        rp.updateOpenraveJoints('R', rp.getJointsFromPose('R', right, rp.getCurrentGrasp('R')), grasp=rp.getCurrentGrasp('R'))
        rospy.loginfo('Press enter to go to next step')
        raw_input()
        
    return
    
    leftTraj = rp.jointTraj[MyConstants.Arm.Left]
    rightTraj = rp.jointTraj[MyConstants.Arm.Right]
    
    for left, right in zip(leftTraj,rightTraj):
        if rospy.is_shutdown():
            break
        rp.updateOpenraveJoints('L', left)
        rp.updateOpenraveJoints('R', right)
        rospy.loginfo('Press enter to go to next step')
        raw_input()
Exemplo n.º 28
0
    def run(self):
        while(self.running):
            if True:
                self.broadcaster.sendTransform(CHESSBOARD_TRANSLATION, tfx.tb_to_quat(CHESSBOARD_ROLL_0LINK, CHESSBOARD_PITCH_0LINK, CHESSBOARD_YAW_0LINK),
                    rospy.Time.now(), CHESSBOARD_FRAME, '0_link')
                
                self.broadcaster.sendTransform(SCREW_TRANSLATION, tfx.tb_to_quat(SCREW_ROLL_0LINK, SCREW_PITCH_0LINK, SCREW_YAW_0LINK),
                    rospy.Time.now(), SCREW_FRAME, '0_link')
                

                # get markers and accompanying metadata
                frame = owlGetIntegerv(OWL_FRAME_NUMBER)[0]
                timestamp = owlGetIntegerv(OWL_TIMESTAMP)
                if len(timestamp) > 0:
                    timeInSeconds = timestamp[1]
                    timeInSeconds += timestamp[2] * 1e-6
                    self.messageTime = rospy.Time.from_sec(timeInSeconds)
                
                markers = owlGetMarkers()
                
                if owlGetError() != OWL_NO_ERROR:
                    rospy.loginfo('Getting markers failed...')
                    sys.exit(0)
                    pass
                
                gripperMarkers = []
                registrationMarkers = []
                
                # find the correct markers
                for i in range(markers.size()):
                    m = markers[i]
                    
                    if m.cond > 0:
                        if m.id in GRIPPER_MARKER_IDS:
                            gripperMarkers.append(m)
                        elif m.id in REGISTRATION_MARKER_IDS:
                            registrationMarkers.append(m)
    
                        if self.verbose:
                            print("marker %06d %d: %f %f %f %f" % (frame, m.id, m.cond, m.x, m.y, m.z))
                            
                # get the gripper pose (in phasespace basis)
                if len(gripperMarkers) == 3:
                    gTrans, gRot = self.poseFromMarkers2(gripperMarkers, GRIPPER_MARKER_IDS, rotate=True)
                    
                    gripperPosePhasespace = tfx.pose(gTrans, gRot, frame=PHASESPACE_FRAME, stamp=self.messageTime)
                    self.gripper_pose_pub.publish(gripperPosePhasespace.msg.PoseStamped())
                    
                    self.broadcaster.sendTransform(gTrans, gRot, self.messageTime, PHASESPACE_GRIPPER_FRAME, PHASESPACE_FRAME)
                
                # get the registration marker pose (in phasespace basis)
                if self.register and len(registrationMarkers) == 3:
                    rTrans, rRot = self.poseFromMarkers(registrationMarkers, REGISTRATION_MARKER_IDS, rotate=False)
                    screwPosePhasespaceFrame = tfx.pose(rTrans, rRot)
                    phasespacePoseScrewFrame = screwPosePhasespaceFrame.inverse()

                    self.broadcaster.sendTransform(phasespacePoseScrewFrame.translation, phasespacePoseScrewFrame.rotation, self.messageTime, PHASESPACE_FRAME, CHESSBOARD_FRAME)
                
            self.finished = True
Exemplo n.º 29
0
 def _estimatePoseCallback(msg):
     rospy.loginfo('Received estimated gripper pose in MoveTowardsReceptacle')
     print 'pose'
     print tfx.pose(msg)
     print 'time'
     print tfx.pose(msg).stamp.seconds
     print 'now - msg time'
     print rospy.Time.now().to_sec() - tfx.pose(msg).stamp.seconds
Exemplo n.º 30
0
 def _avg_handle_poses_callback(self, msg):
     min_dist, handle_pose = np.inf, None
     for pose in msg.poses:
         dist = tfx.pose(pose).position.norm
         if dist < min_dist:
             min_dist = dist
             handle_pose = tfx.pose(pose, header=msg.header)
     self.handle_pose = handle_pose
 def _estimatePoseCallback(msg):
     rospy.loginfo('Received estimated gripper pose in MoveTowardsReceptacle')
     print 'pose'
     print tfx.pose(msg)
     print 'time'
     print tfx.pose(msg).stamp.seconds
     print 'now - msg time'
     print rospy.Time.now().to_sec() - tfx.pose(msg).stamp.seconds
Exemplo n.º 32
0
    def predictSinglePose(self, pose, arm_side):
        # Convert pose to numpy matrix
        p = tft.translation_matrix([pose.position.x,pose.position.y,pose.position.z])
        rot = tft.quaternion_matrix([pose.orientation.x,pose.orientation.y,pose.orientation.z,pose.orientation.w])
        input_pose = np.dot(p,rot)

        gpList, sysList = self.predict([input_pose], arm_side)
        return tfx.pose(gpList[0], frame=pose.frame, stamp=pose.stamp), tfx.pose(sysList[0], frame=pose.frame, stamp=pose.stamp)
Exemplo n.º 33
0
 def _avg_handle_poses_callback(self, msg):
     min_dist, handle_pose = np.inf, None
     for pose in msg.poses:
         dist = tfx.pose(pose).position.norm
         if dist < min_dist:
             min_dist = dist
             handle_pose = tfx.pose(pose, header=msg.header)
     self.handle_pose = handle_pose
Exemplo n.º 34
0
    def __init__(self):

        self.outcomes = None

        self.homePoseLeft = tfx.pose([-0.05009142015689537, 0.03540081898889297, -0.14583058800170023],
            (-0.01797605558439532, -0.9838454461982115, 0.03728902700169569, 0.17416810237794897))

        self.homePoseRight = tfx.pose([0.061241857051286236, 0.014307808069346816, -0.10446866837544996],
            (-0.9689889616996428, 0.1939060552483166, -0.1474787309756946, 0.04136251626876463))
Exemplo n.º 35
0
 def __init__(self, armName, rand=False, x=.03, y=.05, z=.004, testAngles=False, maxPoses=1000, speed=0.01):
     self.ravenArm = RavenArm(armName, defaultPoseSpeed=speed)
     self.arm = armName
     if armName == 'R':
         self.homePose = tfx.pose([-.12,-.02,-.135],tfx.tb_angles(-90,90,0))
     else:
         self.homePose = tfx.pose([-.03,-.02,-.135],tfx.tb_angles(-90,90,0))
     self.random = rand
     print self.homePose
     
     self.grasp = 1.2
     joints_dict = kinematics.invArmKin(self.arm, self.homePose, self.grasp)
     print joints_dict
     
     self.x_levels = 5
     self.y_levels = 5
     self.z_levels = 7
     
     self.x = x
     self.y = y
     self.z = z
     
     self.maxPoses = maxPoses
     
     # generate all angle combinations in order to sample the rotations as well
     self.angles = []
     
     self.angles.append(tfx.tb_angles(-90,90,0))
     
     if False:
         self.angles.append(tfx.tb_angles(-90,80,0))
         self.angles.append(tfx.tb_angles(-90,100,0))
         
         self.angles.append(tfx.tb_angles(-80,90,0))
         self.angles.append(tfx.tb_angles(-80,80,0))
         self.angles.append(tfx.tb_angles(-80,100,0))
         
         self.angles.append(tfx.tb_angles(-100,90,0))
         self.angles.append(tfx.tb_angles(-100,80,0))
         self.angles.append(tfx.tb_angles(-100,100,0))
     
     self.grid = []
     self.grid += [self.homePose + [float(i)*(x/self.x_levels),0,0] for i in xrange(self.x_levels)]
     self.grid += [self.homePose + [x, float(i)*(-y/self.y_levels),0] for i in xrange(self.x_levels)]
     self.grid += [self.homePose + [-float(i)*(x/self.x_levels),-y,0] for i in xrange(self.x_levels)]
     self.grid += [self.homePose + [0, -float(i)*(-y/self.y_levels),0] for i in xrange(self.x_levels)]
     
     """
     self.grid = []
     self.grid += [tfx.pose([x/self.x_levels,0,0]) for _ in xrange(self.x_levels)]
     self.grid += [tfx.pose([0,-y/self.y_levels,0]) for _ in xrange(self.y_levels)]
     self.grid += [tfx.pose([-x/self.x_levels,0,0]) for _ in xrange(self.x_levels)]
     self.grid += [tfx.pose([0,y/self.y_levels,0]) for _ in xrange(self.y_levels)]
     """
     
     """
def testRotation():
    rospy.init_node('ar_image_detection')

    imageDetector = ARImageDetector()
    listener = tf.TransformListener()
    tf_br = tf.TransformBroadcaster()
    

    while not rospy.is_shutdown():
          if imageDetector.hasFoundGripper(Constants.Arm.Left):
                obj = tfx.pose([0,0,0], imageDetector.normal).msg.PoseStamped()
                gripper = imageDetector.getGripperPose(Constants.Arm.Left)

                print('gripper')
                print(gripper)

                # obj_tb = tfx.tb_angles(obj.pose.orientation)
                gripper_tb = tfx.tb_angles(gripper.pose.orientation)
                print "gripper ori", gripper_tb
                obj_tb = tfx.tb_angles(imageDetector.normal)
                print "obj ori", obj_tb
                pt = gripper.pose.position
                ori = imageDetector.normal
                tf_br.sendTransform((pt.x, pt.y, pt.z), (ori.x, ori.y, ori.z, ori.w),
                                    gripper.header.stamp, '/des_pose', Constants.AR.Frames.Base)
                
                
                between = Util.angleBetweenQuaternions(ori, gripper_tb.msg)
                print('Angle between')
                print(between)

                quat = tft.quaternion_multiply(gripper_tb.quaternion, tft.quaternion_inverse(obj_tb.quaternion))
                print 'new', tfx.tb_angles(quat)

                #rot = gripper_tb.rotation_to(ori)
                rot = gripper_tb.rotation_to(obj_tb)
                print('Rotation from gripper to obj')
                print(rot)

                deltaPoseTb = tfx.pose(Util.deltaPose(gripper, obj)).orientation
                print('deltaPose')
                print(deltaPoseTb)

                deltaPose = tfx.pose([0,0,0], deltaPoseTb).msg.PoseStamped()
                time = listener.getLatestCommonTime('0_link', 'tool_L')
                deltaPose.header.stamp = time
                deltaPose.header.frame_id = '0_link'
                deltaPose = listener.transformPose('tool_L', deltaPose)
                print "transformed", tfx.tb_angles(deltaPose.pose.orientation)

                endQuatMat = gripper_tb.matrix * rot.matrix
                print 'desOri', tfx.tb_angles(endQuatMat)
                

          rospy.sleep(1)
Exemplo n.º 37
0
    def _handles_loop(self):
        """
        For each handle in HandleListMsg,
        calculate average pose
        """
        while not rospy.is_shutdown():
            
            self.handle_list_msg = None
            self.camera_pose = None
            while not rospy.is_shutdown() and (self.handle_list_msg is None or self.camera_pose is None):
                rospy.sleep(.01)
            handle_list_msg = self.handle_list_msg
            camera_pose = self.camera_pose
            
            pose_array = gm.PoseArray()
            pose_array.header.frame_id = 'base_link'
            pose_array.header.stamp = rospy.Time.now()
            
            avg_pose_array = gm.PoseArray()
            avg_pose_array.header.frame_id = 'base_link'
            avg_pose_array.header.stamp = rospy.Time.now()
    
            if handle_list_msg.header.frame_id.count('base_link') > 0:
                cam_to_base = np.eye(4)
            else:
                cam_to_base = camera_pose.matrix #tfx.lookupTransform('base_link', handle_list_msg.header.frame_id).matrix
            switch = np.matrix([[0, 1, 0],
                                [1, 0, 0],
                                [0, 0, 1]])        
            for handle in handle_list_msg.handles:
                all_poses = [tfx.pose(cylinder.pose, stamp=rospy.Time.now(), frame=handle_list_msg.header.frame_id) for cylinder in handle.cylinders]
                
                rotated_poses = [tfx.pose(p.position, tfx.tb_angles(p.orientation.matrix*switch)) for p in all_poses]
                filtered_poses = list()
                for rot_pose in rotated_poses:
                    r_base = cam_to_base[:3,:3]*rot_pose.orientation.matrix
                    if r_base[0,0] > 0:
                        if r_base[2,2] > 0:
                            rot_pose.orientation = tfx.tb_angles(rot_pose.orientation.matrix*tfx.tb_angles(0,0,180).matrix) 
                        filtered_poses.append(rot_pose)
                
                filtered_poses = [tfx.pose(cam_to_base*pose.matrix, frame='base_link') for pose in filtered_poses]
                filtered_poses = filter(lambda pose: min(self.min_positions < pose.position.array) and min(pose.position.array < self.max_positions), filtered_poses)
                pose_array.poses += [pose.msg.Pose() for pose in filtered_poses]
                
                if len(filtered_poses) > 0:
                    avg_position = sum([p.position.array for p in filtered_poses])/float(len(filtered_poses))
                    avg_quat = sum([p.orientation.quaternion for p in filtered_poses])/float(len(filtered_poses))
                    avg_pose_array.poses.append(tfx.pose(avg_position, avg_quat).msg.Pose())
                

            self.handles_pose_pub.publish(pose_array)
            self.avg_handles_pose_pub.publish(avg_pose_array)
            
            self.logger_pub.publish('handles {0}'.format(len(avg_pose_array.poses)))
    def generateTraj(self, deltaPose):
        startPose = tfx.pose(self.ravenArm.getGripperPose())
        deltaPose = tfx.pose(deltaPose)
        
        #code.interact(local=locals())
        startJoints = {0:0.51091998815536499,
                       1:1.6072717905044558,
                       2:-0.049991328269243247,
                       4:0.14740140736103061,
                       5:0.10896652936935426,
                       8:-0.31163200736045837}
        

        endJoints = {0:0.45221099211619786,
                     1:2.2917657932075581,
                     2:-0.068851854076958902,
                     4:0.44096283117933965,
                     5:0.32085205361054148,
                     8:-0.82079765727524379}
        
        endPose = Util.endPose(startPose, deltaPose)
        #endPose = startPose
        
        #IPython.embed()
        
        jointTraj = self.ravenPlanner.getTrajectoryFromPose(endPose, startJoints=startJoints, n_steps=15)
        """
        n_steps = 15
        
        self.ravenPlanner.updateOpenraveJoints(startJoints)
        
        endJointPositions = endJoints.values()
        request = Request.joints(n_steps, self.ravenPlanner.manip.GetName(), endJointPositions)
        
        # convert dictionary into json-formatted string
        s = json.dumps(request)
        # create object that stores optimization problem
        prob = trajoptpy.ConstructProblem(s, self.ravenPlanner.env)
        # do optimization
        result = trajoptpy.OptimizeProblem(prob)

        # check trajectory safety
        from trajoptpy.check_traj import traj_is_safe
        prob.SetRobotActiveDOFs()
        if not traj_is_safe(result.GetTraj(), self.ravenPlanner.robot):
            rospy.loginfo('Trajopt trajectory is not safe. Trajopt failed!')
            rospy.loginfo('Still returning trajectory')
            #return

        IPython.embed()

        return self.ravenPlanner.trajoptTrajToDicts(result.GetTraj())
        """
        return jointTraj
Exemplo n.º 39
0
    def touch(self):
        """
        Position the gripper facing downwards and move down
        """
        home_pose = tfx.pose(tfx.pose([0.54, 0.2, 0.85], tfx.tb_angles(0,90,0), frame='base_link'))
        home_joints = [0.57, 0.1233, 1.288, -1.58564, 1.695, -1.85322, 14.727]
        delta_pos = [0, 0, -0.10]
        speed = 0.02
        file = '../data/touch_{0}.bag'.format(self.object_material)

        self.execute_experiment(file, home_joints, home_pose, delta_pos, speed=speed)
Exemplo n.º 40
0
    def execute_raster_single_row_reverse(self, n, pick_up_tool=True):
        """ Repeatedly passes n times over a single row of a tissue brick in the reverse direction"""

        steps = 12
        poses = []

        origin = np.hstack(np.array(self.tissue_pose.position))
        frame = np.array(self.tissue_pose.orientation.matrix)

        u, v, w = frame.T[0], frame.T[1], frame.T[2]

        rotation_matrix = np.array([v, u, -w]).transpose()

        dy = self.tissue_length/(steps)
        z = self.probe_offset

        # pick up tool
        if pick_up_tool:
            self.pick_up_tool()

        self.probe_stop_reset()
        for i in range(steps-3):
            if i == 0:
                continue
            if i == 3:
                for _ in range(n):
                    offset = np.dot(frame, np.array([i*dy, self.tissue_width*0.95, z+0.02]))
                    pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                    self.psm1.move_cartesian_frame_linear_interpolation(pose, 0.01, False)

                    # reverse pass over the row
                    offset = np.dot(frame, np.array([i*dy, self.tissue_width*0.95, z]))
                    pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                    self.psm1.move_cartesian_frame_linear_interpolation(pose, 0.01, False)

                    # start recording data
                    rospy.sleep(0.2)
                    self.probe_start()

                    offset = np.dot(frame, np.array([i*dy, 0.0, z]))
                    pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                    self.psm1.move_cartesian_frame_linear_interpolation(pose, 0.01, False)

                    # pause recording data
                    rospy.sleep(0.2)
                    self.probe_pause()

                    offset = np.dot(frame, np.array([i*dy, 0.0, z+0.02]))
                    pose = tfx.pose(origin+offset, rotation_matrix, frame=self.tissue_pose.frame)
                    self.psm1.move_cartesian_frame_linear_interpolation(pose, 0.01, False)


        self.probe_save("probe_data.p")
Exemplo n.º 41
0
def move(arm, pos, SPEED_CLASS):
    """ Handles the different speeds we're using. """
    if SPEED_CLASS == 'Slow':
        arm.move_cartesian_frame_linear_interpolation(
            tfx.pose(pos, TFX_ROTATION), 0.03)
    elif SPEED_CLASS == 'Medium':
        arm.move_cartesian_frame_linear_interpolation(
            tfx.pose(pos, TFX_ROTATION), 0.06)
    elif SPEED_CLASS == 'Fast':
        arm.move_cartesian_frame(tfx.pose(pos, TFX_ROTATION))
    else:
        raise ValueError()
 def plot_stable_pose(mesh, stable_pose, T_table_world=stf.SimilarityTransform3D(from_frame='world', to_frame='table'), d=0.5, style='wireframe', color=(0.5,0.5,0.5)):
     T_mesh_stp = stf.SimilarityTransform3D(pose=tfx.pose(stable_pose.r))
     mesh_tf = mesh.transform(T_mesh_stp)
     mn, mx = mesh_tf.bounding_box()
     z = mn[2]
     x0 = np.array([0,0,-z])
     T_table_obj = stf.SimilarityTransform3D(pose=tfx.pose(stable_pose.r, x0),
                                              from_frame='obj', to_frame='table')
     T_world_obj = T_table_world.inverse().dot(T_table_obj)
     MayaviVisualizer.plot_mesh(mesh, T_world_obj.inverse(), style=style, color=color)
     MayaviVisualizer.plot_table(T_table_world, d=d)
     return T_world_obj.inverse()
Exemplo n.º 43
0
Arquivo: probe.py Projeto: yjen/probe
    def record_status_callback(self, data):
        self.status = data.data
        rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
        
        if self.status == START:
            self.palpation_data = []
            self.pose_data = []
        elif self.status == STOP:
            print(self.palpation_data)
            print(self.pose_data)
            print(str(len(self.palpation_data)) + " palp")
            print(str(len(self.pose_data)) + " pose")
            if len(self.palpation_data) != len(self.pose_data):
                print("WARNING: lengths of data arrays do not match")
            
            while len(self.palpation_data) > len(self.pose_data):
                self.palpation_data = self.palpation_data[:len(self.palpation_data)-1]
                print(str(len(self.palpation_data)) + " palp")
            while len(self.pose_data) > len(self.palpation_data):
                self.pose_data = self.pose_data[:len(self.palpation_data)-1]
                print(str(len(self.pose_data)) + " pose")

            palp_data_file = open('palpation_data_.csv', 'w')
            pose_data_file = open('pose_data_.p', 'wb+')
            for d in self.palpation_data:
                palp_data_file.write(str(d) + ',')
            pickle.dump(self.pose_data, pose_data_file)
            pose_data_file.close()
            palp_data_file.close()
            
            combined_data = [(self.palpation_data[i], self.pose_data[i]) for i in range(len(self.palpation_data))]

            combined_data_file = open('combined_data_.p', 'wb+')
            pickle.dump(combined_data, combined_data_file)
            combined_data_file.close()


            # palp_data_file = open('palpation_data', 'r')
            # pose_data_file = open('pose_data', 'rb+')
            
            # self.palpation_data = palp_data_file.read().split(',')
            # self.palpation_data = [float(d) for d in self.palpation_data]

            # self.pose_data = pickle.load(pose_data_file)


            start_end_points = pp.estimate_vein_location(self.palpation_data, self.pose_data, False)
            print(start_end_points)
            start_pose = tfx.pose(start_end_points[0]) # x,y,z
            end_pose = tfx.pose(start_end_points[1])
            self.publisher_start.publish(start_pose.msg.PoseStamped())
            self.publisher_end.publish(end_pose.msg.PoseStamped())
def testSwitchPlaces(armNames=[MyConstants.Arm.Left,MyConstants.Arm.Right],fixedPose=False):
    rospy.init_node('testSwitchPlaces',anonymous=True)
    rp = RavenPlanner(armNames)
    rospy.sleep(2)
    
    leftStartJoints = None
    rightStartJoints = None
    
    if fixedPose:
        rightCurrPose = tfx.pose([-0.068,-0.061,-0.129],tfx.tb_angles(-139.6,88.5,111.8),frame=MyConstants.Frames.Link0)
        
        leftCurrPose = tfx.pose([-.072,-.015,-.153],tfx.tb_angles(43.9,78.6,100.9),frame=MyConstants.Frames.Link0)
    
        leftStartJoints = rp.getJointsFromPose(MyConstants.Arm.Left, leftCurrPose, grasp=0)
        rightStartJoints = rp.getJointsFromPose(MyConstants.Arm.Right, rightCurrPose, grasp=0)
    else:
        leftCurrPose = Util.convertToFrame(tfx.pose([0,0,0],frame=rp.toolFrame[MyConstants.Arm.Left]),MyConstants.Frames.Link0)
        rightCurrPose = Util.convertToFrame(tfx.pose([0,0,0],frame=rp.toolFrame[MyConstants.Arm.Right]),MyConstants.Frames.Link0)
        
    
    #trajoptpy.SetInteractive(True)
    
    if fixedPose:
        rp.getTrajectoryFromPose(MyConstants.Arm.Left, rightCurrPose, endGrasp=0, startJoints=leftStartJoints, debug=True)
        rp.getTrajectoryFromPose(MyConstants.Arm.Right, leftCurrPose, endGrasp=0, startJoints=rightStartJoints, debug=True)
    else:
        rp.getTrajectoryFromPose(MyConstants.Arm.Left, rightCurrPose, leftStartJoints, debug=True)
        rp.getTrajectoryFromPose(MyConstants.Arm.Right, leftCurrPose, rightStartJoints, debug=True)
    
    """
    for index in range(len(armNames)):
        armName = armNames[index]
        otherArmName = armNames[(index+1)%len(armNames)]
        desPose = Util.convertToFrame(tfx.pose([0,0,0],frame=rp.toolFrame[otherArmName]),MyConstants.Frames.Link0)
        
        rp.getTrajectoryFromPose(armName, desPose, debug=True)
    """
    
    rp.getTrajectoryFromPoseThread(once=True)
#     while rp.trajRequest[armNames[0]] and rp.trajRequest[armNames[1]] and not rospy.is_shutdown():
#         rospy.sleep(.05)
        
    leftTraj = rp.jointTraj[MyConstants.Arm.Left]
    rightTraj = rp.jointTraj[MyConstants.Arm.Right]
    
    rp.env.SetViewer('qtcoin')
    
    for left, right in zip(leftTraj,rightTraj):
        rospy.loginfo('Press enter to go to next step')
        raw_input()
        rp.updateOpenraveJoints('L', left)
        rp.updateOpenraveJoints('R', right)
Exemplo n.º 45
0
    def __init__(self):

        self.outcomes = None

        self.homePoseLeft = tfx.pose(
            [-0.05009142015689537, 0.03540081898889297, -0.14583058800170023],
            (-0.01797605558439532, -0.9838454461982115, 0.03728902700169569,
             0.17416810237794897))

        self.homePoseRight = tfx.pose(
            [0.061241857051286236, 0.014307808069346816, -0.10446866837544996],
            (-0.9689889616996428, 0.1939060552483166, -0.1474787309756946,
             0.04136251626876463))
Exemplo n.º 46
0
    def grasp_point_callback(self, msg):
        self.allGraspPoints = {}
        if not self.graspPointFound:
            graspCount = 0
            for graspPose in msg.poses:
                graspPose = tfx.pose(graspPose)
                graspPose.stamp = msg.header.stamp
                graspPose.frame = msg.header.frame_id
                self.allGraspPoints[graspCount] = graspPose
                graspCount += 1

            self.graspPoint = tfx.pose(self.get_grasp_point(self.allGraspPoints.values()), copy = True)
            self.graspPointFound = True
Exemplo n.º 47
0
def test_config3():
    jl = {0: .517, 1: 1.621, 2: -.04995, 4: .084, 5: .123, 6: .946, 7: .977}
    pl = tfx.pose((-.015, -.015, -.069),
                  tfx.tb_angles(-151.1, 76.0, -73.8),
                  frame='/0_link')
    gl = 1.923

    jr = {0: .509, 1: 1.609, 2: -.04984, 4: .111, 5: .117, 6: -.637, 7: -.604}
    pr = tfx.pose((-.136, -.017, -.068),
                  tfx.tb_angles(-67.6, 68.8, 39.1),
                  frame='/0_link')
    gr = 1.240
    return jl, pl, gl, jr, pr, gr
Exemplo n.º 48
0
def test_config2():
    jl = {0: .517, 1: 1.623, 2: -.05, 4: .16, 5: .161, 6: .99, 7: 1.023}
    pl = tfx.pose((-.015, -.014, -.069),
                  tfx.tb_angles(-160.6, 75.7, -87.2),
                  frame='/0_link')
    gl = 2.013

    jr = {0: .511, 1: 1.607, 2: -.05, 4: .109, 5: .110, 6: -.652, 7: -.634}
    pr = tfx.pose((-.136, -.017, -.068),
                  tfx.tb_angles(-66.3, 68.8, 40.3),
                  frame='/0_link')
    gr = 1.286
    return jl, pl, gl, jr, pr, gr
Exemplo n.º 49
0
def setup_environment(obs_type, M=1000, lr='r', zero_seed=True):
    if zero_seed:
        random.seed(0)
        
    brett = pr2_sim.PR2('../envs/pr2-test.env.xml')
    env = brett.env
    
    larm = brett.larm
    rarm = brett.rarm
    l_kinect = brett.l_kinect
    r_kinect = brett.r_kinect
    
    larm.set_posture('mantis')
    rarm.set_posture('mantis')
    
    table = env.GetKinBody('table')
    base = table.GetLink('base')
    extents = base.Geometry.GetBoxExtents(base.GetGeometries()[0])
    
    table_pose = tfx.pose(table.GetTransform())
    # assume table has orientation np.eye(3)
    x_min, x_max = table_pose.position.x - extents[0], table_pose.position.x + extents[0]
    y_min, y_max = table_pose.position.y - extents[1], table_pose.position.y + extents[1]
    z_min, z_max = table_pose.position.z + extents[2], table_pose.position.z + extents[2] + .2
    
    mug = env.GetKinBody('mug')
    mug_pose = tfx.pose(mug.GetTransform())
    
    #x_min, x_max = mug_pose.position.x - .03, mug_pose.position.x + .03
    #y_min, y_max = mug_pose.position.y + .03, mug_pose.position.y + .03
    #z_min, z_max = mug_pose.position.z + extents[2], mug_pose.position.z + extents[2] + .2
    
    particles = list()
    for i in xrange(M):
        x = random_within(x_min, x_max)
        y = random_within(y_min, y_max)
        z = random_within(z_min, z_max)
        particle = tfx.point([x,y,z])
        particles.append(particle)
        
    particles = sorted(particles, key=lambda x: (x-mug_pose.position).norm)
        
    arm = larm if lr == 'l' else rarm
    kinect = l_kinect if lr == 'l' else r_kinect
        
    eih_sys = EihSystem(env, arm, kinect, obs_type)
    kinect.render_on()
    #arm.set_pose(tfx.pose([2.901, -1.712,  0.868],tfx.tb_angles(-143.0, 77.9, 172.1))) # FOR rarm ONLY
    time.sleep(1)
    
    return eih_sys, particles
Exemplo n.º 50
0
    def teleop(self):
        rospy.loginfo('{0} arm teleop'.format(self.arm_name))

        pos_step = .05
        delta_position = {
            'a': [0, pos_step, 0],
            'd': [0, -pos_step, 0],
            'w': [pos_step, 0, 0],
            'x': [-pos_step, 0, 0],
            '+': [0, 0, pos_step],
            '-': [0, 0, -pos_step]
        }

        angle_step = 2.0
        delta_angle = {
            'o': [angle_step, 0, 0],
            'p': [-angle_step, 0, 0],
            'k': [0, angle_step, 0],
            'l': [0, -angle_step, 0],
            'n': [0, 0, angle_step],
            'm': [0, 0, -angle_step]
        }

        char = ''
        while char != 'q':
            char = utils.Getch.getch()
            pose = self.get_pose()
            print('pose: {0}'.format(pose))
            new_pose = tfx.pose(pose)
            if delta_position.has_key(char):
                print('delta_position: {0}'.format(delta_position[char]))
                new_pose.position = pose.position.array + delta_position[char]
            ypr = np.array([
                pose.tb_angles.yaw_deg, pose.tb_angles.pitch_deg,
                pose.tb_angles.roll_deg
            ])
            if delta_angle.has_key(char):
                print('delta_angle: {0}'.format(delta_angle[char]))
                ypr += np.array(delta_angle[char])
                new_pose = tfx.pose(pose.position,
                                    tfx.tb_angles(ypr[0], ypr[1], ypr[2]))
            print('new_pose: {0}'.format(new_pose))
            new_joints = self.ik(new_pose)
            if new_joints is not None:
                rospy.loginfo('Invalid pose')
                self.go_to_joints(new_joints)
            rospy.sleep(.01)

        rospy.loginfo('{0} arm end teleop'.format(self.arm_name))
Exemplo n.º 51
0
    def predictSinglePose(self, pose, arm_side):
        # Convert pose to numpy matrix
        p = tft.translation_matrix(
            [pose.position.x, pose.position.y, pose.position.z])
        rot = tft.quaternion_matrix([
            pose.orientation.x, pose.orientation.y, pose.orientation.z,
            pose.orientation.w
        ])
        input_pose = np.dot(p, rot)

        gpList, sysList = self.predict([input_pose], arm_side)
        return tfx.pose(gpList[0], frame=pose.frame,
                        stamp=pose.stamp), tfx.pose(sysList[0],
                                                    frame=pose.frame,
                                                    stamp=pose.stamp)
Exemplo n.º 52
0
    def grasp_point_callback(self, msg):
        self.allGraspPoints = {}
        if not self.graspPointFound:
            graspCount = 0
            for graspPose in msg.poses:
                graspPose = tfx.pose(graspPose)
                graspPose.stamp = msg.header.stamp
                graspPose.frame = msg.header.frame_id
                self.allGraspPoints[graspCount] = graspPose
                graspCount += 1

            self.graspPoint = tfx.pose(self.get_grasp_point(
                self.allGraspPoints.values()),
                                       copy=True)
            self.graspPointFound = True
Exemplo n.º 53
0
    def getGripperPose(self,frame=raven_constants.Frames.Link0):
        """
        Returns gripper pose w.r.t. frame

        geometry_msgs.msg.Pose
        """
        pose = tfx.pose([0,0,0],frame=self.toolFrame)
        return tfx.convertToFrame(pose, self.commandFrame, pose.frame, wait=10)
        
        gripperPose = self.ravenController.currentPose

        if gripperPose != None:
            gripperPose = raven_util.convertToFrame(tfx.pose(gripperPose), frame)

        return gripperPose
 def successorState(self):
     ''' Updates current state to the next successor state
   by default it just returns current state
 '''
     if self.valid:
         newPosition = self.currentGoal
     else:
         print "The last successor state was invalid"
         print "Enter a custom pose x y z"
         coord = raw_input().split()
         newPosition = [float(x) for x in coord]
     newRightArmPose = tfx.pose(self.rightArm.get_pose()
                                )  # only want to change position not angles
     newRightArmPose.position = newPosition
     newRightJointsList = self.graspPlanner.get_grasp_joint_trajectory(
         self.rightArm.get_joints(), newRightArmPose.position, n_steps=10)
     print newRightJointsList
     if newRightJointsList is None:
         rospy.loginfo('Invalid Right Arm Pose')
         print newPosition
         self.valid = False
     else:
         for newRightJoints in newRightJointsList:
             writeToOutput(newRightJoints)
         self.rightArm.execute_joint_trajectory(newRightJointsList)
         self.valid = True
     rospy.sleep(.01)
     return self  # Modify to generate a valid successor states, does not update state if pose is invalid and instead reverts to last valid state
Exemplo n.º 55
0
    def ik_lookat(self, position, point):
        """
        :param position: desired position of tool_frame (tfx.point)
        :param point: desired point for tool_frame to point at (tfx.point)
        """
        assert position.frame == 'base_link'

        position_world = self.sim.transform_from_to(position.array,
                                                    'base_link', 'world')
        direction_world = np.dot(
            self.sim.transform_from_to(np.eye(4), 'base_link',
                                       'world')[:3, :3],
            point.array - position.array)
        direction_world /= np.linalg.norm(direction_world)

        red = direction_world
        red /= np.linalg.norm(red)
        green = np.cross(red, np.array([0, 0, 1]))
        green /= np.linalg.norm(red)
        blue = np.cross(red, green)
        blue /= np.linalg.norm(blue)

        pose_mat = np.eye(4)
        pose_mat[:3, 0] = red
        pose_mat[:3, 1] = green
        pose_mat[:3, 2] = blue
        pose_mat[:3, 3] = position_world

        pose = tfx.pose(self.sim.transform_from_to(pose_mat, 'world',
                                                   'base_link'),
                        frame='base_link')
        return self.ik(pose)
Exemplo n.º 56
0
def test_cd():
    global handles
    env = rave.Environment()
    env.Load('../envs/pr2-test.env.xml')
    env.SetViewer('qtcoin')

    mug = env.GetKinBody('mug')
    mug_pos = tfx.pose(mug.GetTransform()).position.array
    mugcd = rave.databases.convexdecomposition.ConvexDecompositionModel(mug)
    if not mugcd.load():
        mugcd.autogenerate()

    mugcd_trimesh = mugcd.GenerateTrimeshFromHulls(mugcd.linkgeometry[0][0][1])

    new_mug = rave.RaveCreateKinBody(env, '')
    new_mug.SetName('new_mug')
    new_mug.InitFromTrimesh(mugcd_trimesh)
    new_mug.SetTransform(mug.GetTransform())
    #env.Add(new_mug, True)
    env.Remove(mug)

    I, V = mugcd_trimesh.indices, mugcd_trimesh.vertices
    for indices in I:
        v0 = mug_pos + V[indices[0], :]
        v1 = mug_pos + V[indices[1], :]
        v2 = mug_pos + V[indices[2], :]

        handles += utils.plot_segment(env, v0, v1)
        handles += utils.plot_segment(env, v1, v2)
        handles += utils.plot_segment(env, v2, v0)

    IPython.embed()
Exemplo n.º 57
0
 def fit(self, data):
     A = np.vstack(data[:, i] for i in self.input_columns).T
     B = np.vstack(data[:, i] for i in self.output_columns).T
     tf = transformationEstimationSVD(A, B, self.translation_offset)
     pose = tfx.pose(tf)
     vec = np.r_[pose.position.list, pose.orientation.list].T
     return vec
Exemplo n.º 58
0
    def get_error(self, data, model):
        # convert model to transform
        pose = tfx.pose(model[:3], model[3:])
        tf = pose.matrix
        A = np.vstack(data[:, i] for i in self.input_columns)
        B = np.vstack(data[:, i] for i in self.output_columns)

        A_reshaped = np.zeros((3, 0))
        B_reshaped = np.zeros((3, 0))
        for i in range(A.shape[1]):
            A_reshaped = np.c_[A_reshaped, np.reshape(A[:, i], (3, 4))]
            B_reshaped = np.c_[B_reshaped, np.reshape(B[:, i], (3, 4))]

        A_ext = np.r_[A_reshaped,
                      np.tile([0, 0, 0, 1], (1, A_reshaped.shape[1] / 4))]

        B_fit = tf.dot(A_ext)
        B_fit = B_fit[:3, :]

        err_per_point = np.sum(np.square(B_reshaped - B_fit), axis=0).T
        err_per_point = err_per_point.A1

        err_per_point = np.reshape(err_per_point,
                                   (err_per_point.shape[0] / 4, 4)).T
        err_per_point = np.sum(err_per_point, axis=0)
        #  IPython.embed()
        return err_per_point
Exemplo n.º 59
0
    def getEstimatedPose(self):
        if self.estimated_gripper_pose is not None:
            estimated_gripper_pose = self.estimated_gripper_pose
            estimated_time = estimated_gripper_pose.stamp.seconds
        else:
            pose = tfx.pose([0, 0, 0], [0, 0, 0, 1],
                            frame=self.tool_frame,
                            stamp=rospy.Time.now())
            tf_tool_to_cam = tfx.lookupTransform(self.camera_frame,
                                                 self.tool_frame)
            estimated_gripper_pose = tf_tool_to_cam * pose
            estimated_time = rospy.Time.now().secs

        #estimated_time = estimated_gripper_pose.stamp.seconds
        detected_time = self.detected_gripper_pose.stamp.seconds if self.detected_gripper_pose is not None else -float(
            "inf")

        if not estimated_time > detected_time:
            if math.fabs(estimated_time - detected_time) < 1:
                time_since_detection = 0
            else:
                time_since_detection = 0  #float("inf")
        else:
            time_since_detection = estimated_time - detected_time
        #time_since_detection = (estimated_time - detected_time) if estimated_time > detected_time else float("inf")

        if self.print_messages:
            if self.detected_gripper_pose is None:
                rospy.loginfo('detected_gripper_pose is None')
            rospy.loginfo(
                'estimated_time - detected_time = {0}'.format(estimated_time -
                                                              detected_time))

        self.estimated_gripper_pose = None
        return estimated_gripper_pose, time_since_detection