def posesToDeltaPoses(self, poseList): """ p0 -> p1 -> p2 -> .... to delta(p0->p1) -> delta(p0->p2) -> delta(p0->p3)... """ startPose = poseList[0] return [raven_util.deltaPose(startPose, pose) for pose in poseList[1:]]
def test(): rospy.init_node('testGripperPoseEstimator',anonymous=True) rospy.sleep(2) arms = raven_constants.Arm.Both gpe = GripperPoseEstimator(arms) rospy.sleep(2) estimatePub = dict() truthPub = dict() prevPoseAndIsEstimate = dict() for arm in arms: truthPub[arm] = rospy.Publisher('truth_gripper_pose_%s'%arm,PoseStamped) printEvery = raven_util.Timeout(1) printEvery.start() while not rospy.is_shutdown(): for arm in arms: truthPose = gpe.truthPose.get(arm) estimatedPoseisEstimate = gpe.estimatedPose.get(arm) if truthPose is not None: #print 'Publishing truthPose %s' % arm truthPub[arm].publish(truthPose.msg.PoseStamped()) if estimatedPoseisEstimate is not None: estimatedPose, isEstimate = estimatedPoseisEstimate #print 'Publish estimatedPose %s' % arm if estimatedPoseisEstimate is not None and prevPoseAndIsEstimate.has_key(arm): prevPose, prevIsEstimate = prevPoseAndIsEstimate[arm] if prevIsEstimate is True and isEstimate is False: deltaPose = raven_util.deltaPose(prevPose, estimatedPose) print 'deltaPose between last estimate and current truth' print deltaPose #print 'truthPose_{0}: {1}'.format(arm,truthPose) #print 'isEstimate: {0}'.format(isEstimate) #print 'estimatedPose_{0}: {1}'.format(arm,estimatedPose) if estimatedPoseisEstimate is not None: prevPoseAndIsEstimate[arm] = (estimatedPose, isEstimate) rospy.sleep(.02)
def move(self, targetPose = None): poseErrors = np.zeros((0,6)) deltaPose = tfx.pose([0,0,0]) if targetPose == None: targetPose, deltaPose = self.sample() #cameraEndPose, dummy = self.sample() for j in range(self.numTries): if rospy.is_shutdown(): return None try: # reset self.ravenArm.goToGripperPose(self.holdingPose) self.ravenArm.setGripper(self.grasp, duration=1.0) rospy.sleep(1) startPose = None while startPose is None: if rospy.is_shutdown(): return None startPose = self.getPhasespacePose() n_steps = 20 robotStartPose = self.gripperPoseEstimator.getGripperPose(self.arm).copy() print robotStartPose # used for error model # cameraStartPose = tfx.pose([-0.107, 0.9899, 0.0930, -0.0455, -0.2510, 0.0636, -0.9659, -0.019, -0.9621, -0.1267, 0.2416, -0.0949, 0, 0, 0, 1.0000]) # cameraEndPose = tfx.pose([0.1263, 0.9422, 0.3103, 0.0063, -0.3207, 0.3348, -0.8860, -0.0304, -0.9387, 0.0124, 0.3445, -0.1530, 0, 0, 0, 1.0000]) # startPose = tfx.pose([0.0008, 1.000, 0.0003, -0.0200, 0.0012, 0.0003, -1.0000, -0.0197, -1.000, 0.0008, -0.0012, -0.1051, 0, 0, 0, 1.0000]) # endPose = tfx.pose([-0.0000, 1.0000, 0.0000, 0.0355, 0.0008, 0.0000, -1.0000, -0.0200, -1.000, -0.0000, -0.0008, -0.1580, 0, 0, 0, 1.0000]) # cameraStartPose.frame = '0_link' # cameraEndPose.frame = '0_link' # startPose.frame = '0_link' # endPose.frame = '0_link' # good trajectory cameraStartPose = tfx.pose([-0.1075, 0.9868, 0.1208, -0.0468, -0.3099, 0.0822, -0.9472, -0.0201, -0.9447, -0.1393, 0.2970, -0.0968, 0, 0, 0, 1.0000]) #cameraEndPose = tfx.pose([-0.1163, 0.9858, 0.1207, -0.1170, -0.2292, 0.0916, -0.9691, -0.0582, -0.9664, -0.1404, 0.2153, -0.1489, 0, 0, 0, 1.0000]) #cameraEndPose = tfx.pose([0, 1, 0, -0.02982, 0, 0, -1, -0.07282, -1, 0, 0, -0.162, 0, 0, 0, 1.0000]) #startPose = tfx.pose([-0.0003, 1.0000, -0.0001, -0.0200, 0.0003, -0.0001, -1.0000, -0.0200, -1.0000, -0.0003, -0.0003, -0.1050, 0, 0, 0, 1.0000]) endPose = tfx.pose([-0.0001, 1.0000, -0.0000, -0.0755, -0.0019, -0.0000, -1.0000, -0.0605, -1.0000, -0.0001, 0.0019, -0.1698, 0, 0, 0, 1.0000]) cameraEndPose = tfx.pose([-0.0825, -0.0603, -0.1487], [0.5, 0.5, -0.5, 0.5]) startPose = tfx.pose([-0.0995, -0.0213, -0.1064], [0.4936, 0.5026, -0.5029, 0.5007]) cameraStartPose.frame = '0_link' cameraEndPose.frame = '0_link' startPose.frame = '0_link' endPose.frame = '0_link' ''' startPose = tfx.pose([-0.0210, 0.9996, -0.0188, -0.0204, 0.0127, -0.0191, -0.9997, -0.0209, -0.9997, -0.0208, 0.0131, -0.1053, 0, 0,0, 1]) #startPose.frame = '0_link' #[-0.0521, 0.9986, 0.0037, 0.0082, -0.0099, 0.0032, -0.9999, -0.0886, -0.9986, -0.0521, 0.0098, -0.1693, 0,0,0, 1.000] cameraStartPose = tfx.pose([-0.0943, 0.9917, 0.0874, -0.0458, -0.2127, 0.0657, -0.9749, -0.0188, -0.9726, -0.1105, 0.2047, -0.0950, 0, 0, 0, 1]) cameraStartPose.frame = '0_link' cameraEndPose = tfx.pose([-0.0559, 0.9882, 0.1424, -0.0357, -0.3918, 0.1094, -0.9135, -0.0839, -0.9183, -0.1069, 0.3811, -0.1343, 0,0,0,1]) cameraEndPose.frame = '0_link' endPose = tfx.pose([0, 1.000, 0, 0.0025, 0, 0, -1.0, -0.0733, -1, 0, 0, -0.1510, 0,0,0,1]) endPose.frame = '0_link' #IPython.embed() ''' (correctedStartPose, deltaPoseTraj) = self.ravenPlanner.getFullTrajectoryFromPose(self.ravenArm.name, cameraEndPose, startPose=startPose, n_steps=n_steps, approachDir=np.array([0,.1,.9]), speed=self.speed, correctTrajectory=True) except RuntimeError as e: rospy.loginfo(e) return 'IKFailure' print 'Executing trajectory attempt', j #self.startDataCollection() #rospy.sleep(1) #rospy.sleep(2) self.ravenArm.executeDeltaPoseTrajectory(deltaPoseTraj, block=True, startPose=correctedStartPose) #rospy.sleep(5) #self.stopDataCollection(self.filename %(self.trajNum), plot=False, targetPose=cameraEndPose) rospy.sleep(2) actualPose = self.getPhasespacePose() errorDeltaPose = raven_util.deltaPose(actualPose, cameraEndPose) translationError = errorDeltaPose.translation.array angleError = np.array(transformations.euler_from_matrix(errorDeltaPose.matrix[:3,:3])) print print 'ERROR' print translationError print angleError print errorVector = np.c_[[translationError], [angleError]] poseErrors = np.r_[poseErrors, errorVector] rospy.sleep(1) return (targetPose.matrix, poseErrors)