class ControlViSP(Control): yaml_tag = u'visp' def __init__(self, motion, yamlData): checkDict('object-name', yamlData) checkDict('position', yamlData) Control.__init__(self, motion, yamlData) self.robot = motion.robot self.objectName = yamlData['object-name'] self.frameName = yamlData['frame-name'] self.position = yamlData['position'] obj = motion.environment.get(self.objectName) if not obj: raise RuntimeError('object does not exist') self.robotPositionFromVisp = RobotPositionFromVisp( 'robotPositionFromViSP' + str(id(yamlData))) # Convert ViSP frame into usual dynamic-graph frame. self.robotPositionFromVisp.setSensorTransformation( (( 0., 0., 1., 0.), ( 0., -1., 0., 0.), (-1., 0., 0., 0.), ( 0., 0., 0., 1.)) ) if motion.ros: self.ros = motion.ros else: self.ros = RosExport('rosExport') self.ros.add('matrixHomoStamped', self.objectName, self.position) self.robotPositionFromVisp.plannedObjectPosition.value = \ obj.plannedPosition.dgRotationMatrix() plug(self.ros.signal(self.objectName), self.robotPositionFromVisp.cMo) plug(self.ros.signal(self.objectName + 'Timestamp'), self.robotPositionFromVisp.cMoTimestamp) # Plug wMc/wMr to robotPositionFromVisp plug(motion.robot.frames[self.frameName].position, self.robotPositionFromVisp.wMc) plug(motion.robot.dynamic.waist, self.robotPositionFromVisp.wMr) def start(self, name, feetFollowerWithCorrection): I = ((1.,0.,0.,0.), (0.,1.,0.,0.), (0.,0.,1.,0.), (0.,0.,0.,1.)) self.estimator = ErrorEstimator(name) self.estimator.setReferenceTrajectory( feetFollowerWithCorrection.referenceTrajectory.name) plug(feetFollowerWithCorrection.referenceTrajectory.waist, self.estimator.planned) self.estimator.setSensorToWorldTransformation(I) #FIXME: we should change the reference point accordingly # with the current contact point. plug(self.robot.dynamic.signal('Jleft-ankle'), self.estimator.referencePointJacobian) self.estimator.plannedCommand.value = self.robot.device.state.value if type(self.robot.device) == RobotSimu: self.estimator.realCommand.value = self.robot.device.state.value else: self.estimator.realCommand.value = self.robot.device.robotState.value plug(self.robotPositionFromVisp.position, self.estimator.position) plug(self.robotPositionFromVisp.positionTimestamp, self.estimator.positionTimestamp) self.setupTrace(self.estimator) return self.estimator def interactiveStart(self, name, feetFollowerWithCorrection): while len(self.ros.signals()) == 0: raw_input("Press enter after starting ROS visp_tracker node.") while len(self.ros.signal(self.objectName).value) < 1: raw_input("Tracking not started...") return self.start(name, feetFollowerWithCorrection) def canStart(self): if not self.ros: return False if len(self.ros.signals()) == 0: return False if len(self.ros.signal(self.objectName).value) < 1: return False if len(self.ros.signal(self.objectName + 'Timestamp').value) < 1: return False return True def setupTrace(self, errorEstimator): self.setupTraceErrorEstimator(self.estimator) for s in [self.objectName, self.objectName + 'Timestamp']: addTrace(self.robot, self.trace, self.ros.name, s) for s in ['cMo', 'cMoTimestamp', 'plannedObjectPosition', 'position', 'positionTimestamp', 'dbgcMo', 'dbgPosition', 'dbgrMc']: addTrace(self.robot, self.trace, self.robotPositionFromVisp.name, s) def __str__(self): msg = "ViSP moving edge tracker control element (frame: {0}, object: {1})" return msg.format(self.frameName, self.objectName)
class ControlMocap(Control): yaml_tag = u'mocap' trackedBody = None def __init__(self, motion, yamlData): checkDict('tracked-body', yamlData) checkDict('perceived-body', yamlData) Control.__init__(self, motion, yamlData) self.corba = motion.corba self.robot = motion.robot self.trackedBody = yamlData['tracked-body'] self.perceivedBody = yamlData['perceived-body'] def computeWorldTransformationFromFoot(self): """ This methods makes the assumption that the robot is placed exactly at its starting position. By comparing the current localization with the starting position of the tracked body, it deduces the transformation between the motion capture system and the control framework. """ self.corba.signal(self.perceivedBody).recompute( self.corba.signal(self.perceivedBody).time + 1) self.robot.dynamic.signal( self.trackedBody).recompute(self.robot.dynamic.signal( self.trackedBody).time + 1) mocapMfoot = XYThetaToHomogeneousMatrix( self.corba.signal(self.perceivedBody).value) sotMfoot = np.matrix(self.robot.dynamic.signal( self.trackedBody).value) # mocap position w.r.t sot frame sotMmocap = sotMfoot * np.linalg.inv(mocapMfoot) return matrixToTuple(sotMmocap) def start(self, name, feetFollowerWithCorrection): self.estimator = ErrorEstimator(name) self.estimator.setReferenceTrajectory( feetFollowerWithCorrection.referenceTrajectory.name) # FIXME: we use ankle position as foot position here # as Z does not matter. plug(feetFollowerWithCorrection.referenceTrajectory.signal( self.trackedBody), self.estimator.planned) if len(self.corba.signals()) == 3: print ("evart-to-client not launched, abandon.") return False if len(self.corba.signal(self.perceivedBody).value) != 3: print ("{0} not tracked, abandon.".format(self.perceivedBody)) return False sMm = self.computeWorldTransformationFromFoot() self.estimator.setSensorToWorldTransformation(sMm) #FIXME: we should change the reference point accordingly # with the current contact point. plug(self.robot.dynamic.signal('Jleft-ankle'), self.estimator.referencePointJacobian) self.estimator.plannedCommand.value = self.robot.device.state.value if type(self.robot.device) == RobotSimu: self.estimator.realCommand.value = self.robot.device.state.value else: self.estimator.realCommand.value = self.robot.device.robotState.value plug(self.corba.signal(self.perceivedBody), self.estimator.position) plug(self.corba.signal( self.perceivedBody + 'Timestamp'), self.estimator.positionTimestamp) self.setupTrace(self.estimator) return self.estimator def interactiveStart(self, name, feetFollowerWithCorrection): while len(self.corba.signals()) == 3: raw_input("Press enter after starting evart-to-corba.") while len(self.corba.signal(self.perceivedBody).value) != 3: raw_input("Body not tracked...") return self.start(name, feetFollowerWithCorrection) def canStart(self): if not self.corba: return False if len(self.corba.signals()) == 3: return False if len(self.corba.signal(self.perceivedBody).value) != 3: return False return True def setupTrace(self, errorEstimator): self.setupTraceErrorEstimator(self.estimator) for s in [self.perceivedBody, self.perceivedBody + 'Timestamp']: addTrace(self.robot, self.trace, self.corba.name, s) def __str__(self): return "motion capture control element" + \ " (tracked: {0}, perceived: {1})".format( self.trackedBody, self.perceivedBody)
class ControlVirtualSensor(Control): yaml_tag = u'virtual-sensor' def __init__(self, motion, yamlData): checkDict('object-name', yamlData) checkDict('position', yamlData) Control.__init__(self, motion, yamlData) self.robot = motion.robot self.objectName = yamlData['object-name'] self.position = Pose6d(yamlData['position']) self.virtualSensor = VirtualSensor( 'virtualSensor' + str(id(yamlData))) #FIXME: should be more generic. feetFollower = find(lambda e: type(e) == MotionWalk, motion.motion) if not feetFollower: raise RuntimeError('control elements needs at least one ' + \ 'walk elelement to apply correction') plug(feetFollower.feetFollower.feetFollower.waist, self.virtualSensor.expectedRobotPosition) plug(motion.robot.dynamic.waist, self.virtualSensor.robotPosition) obj = motion.environment.get(self.objectName) if not obj: raise RuntimeError('object does not exist') self.virtualSensor.expectedObstaclePosition.value = \ obj.plannedPosition.dgRotationMatrix() self.virtualSensor.obstaclePosition.value = \ self.position.dgRotationMatrix() if motion.trace: addTrace(motion.robot, motion.trace, self.virtualSensor.name, 'position') def start(self, name, feetFollowerWithCorrection): I = ((1.,0.,0.,0.), (0.,1.,0.,0.), (0.,0.,1.,0.), (0.,0.,0.,1.)) self.estimator = ErrorEstimator(name) self.estimator.setReferenceTrajectory( feetFollowerWithCorrection.referenceTrajectory.name) # FIXME: not generic enough. plug(feetFollowerWithCorrection.referenceTrajectory.waist, self.estimator.planned) self.estimator.setSensorToWorldTransformation(I) # This does not matter as the real/planned command is the same. plug(self.robot.dynamic.signal('Jleft-ankle'), self.estimator.referencePointJacobian) self.estimator.plannedCommand.value = self.robot.device.state.value self.estimator.realCommand.value = self.robot.device.state.value plug(self.virtualSensor.position, self.estimator.position) plug(self.virtualSensor.positionTimestamp, self.estimator.positionTimestamp) self.setupTraceErrorEstimator(self.estimator) return self.estimator def interactiveStart(self, name, feetFollowerWithCorrection): return self.start(name, feetFollowerWithCorrection) def __str__(self): return "virtual sensor control element" + \ " (object: {0}, position: {1})".format( self.objectName, self.position)
class MotionCaptureErrorEstimationStrategy(ErrorEstimationStrategy): """ Use the motion capture system as an external localization device. The last perceived position of a particular robot body is compared with its planned position to deduce a measurement of the position error. By default, this body is the left foot. It is preferable over other bodies such as the waist which position is modified by the stabilizer on robots such as HRP-2. The timestamp associated with the perception of the body position by the localization system is required to handle delays on the perception part. """ """ Body used for perception. The error is computed by comparing the body localizationPlannedBody in robot.dynamic and the position given by the localization system in corba.signal(localizationPerceivedBody). Additionnally, corba.signal(localizationPerceivedBodyTimestamp) provides the timestamp associated with the current perception. Note: only used when enableLocalization is true. """ localizationPlannedBody = 'left-ankle' localizationPerceivedBody = 'left-foot' localizationPerceivedBodyTimestamp = 'left-footTimestamp' """ CORBA server used to received the perceived positions. """ corba = None def __init__(self, feetFollowerWithCorrection, robot, corba = None): ErrorEstimationStrategy.__init__(self, robot, feetFollowerWithCorrection) # Create CORBA server if required. if not corba: corba = CorbaServer('corba_server') self.corba = corba self.errorEstimator = ErrorEstimator('error_estimator') self.errorEstimator.setReferenceTrajectory( feetFollowerWithCorrection.referenceTrajectory.name) # FIXME: we use ankle position as foot position here # as Z does not matter. plug(feetFollowerWithCorrection.referenceTrajectory.signal (self.localizationPlannedBody), self.errorEstimator.planned) def __str__(self): return "error estimation using the motion capture system " + \ " (tracked body: %s)" % self.localizationPerceivedBody def computeWorldTransformationFromFoot(self): """ This methods makes the assumption that the robot is placed exactly at its starting position. By comparing the current localization with the starting position of the tracked body, it deduces the transformation between the motion capture system and the control framework. """ self.corba.signal(self.localizationPerceivedBody).recompute( self.corba.signal(self.localizationPerceivedBody).time + 1) self.robot.dynamic.waist.recompute(self.robot.dynamic.waist.time + 1) mocapMfoot = XYThetaToHomogeneousMatrix( self.corba.signal(self.localizationPerceivedBody).value) sotMfoot = np.matrix(self.robot.dynamic.signal( self.localizationPlannedBody).value) # mocap position w.r.t sot frame sotMmocap = sotMfoot * np.linalg.inv(mocapMfoot) return matrixToTuple(sotMmocap) def start(self): """ Initialize the motion capture system. Before calling this function, make sure: a. evart-to-client is launched and successfully track the body used to compute the error, b. the robot is placed at its starting position. """ if len(self.corba.signals()) == 3: print ("evart-to-client not launched, abandon.") return False if len(self.corba.signal(self.localizationPerceivedBody).value) != 3: print ("waist not tracked, abandon.") return False self.sMm = self.computeWorldTransformationFromFoot() print("World transformation:") print(HomogeneousMatrixToXYZTheta(self.sMm)) self.errorEstimator.setSensorToWorldTransformation(self.sMm) plug(self.corba.signal(self.localizationPerceivedBody), self.errorEstimator.position) plug(self.corba.signal( self.localizationPerceivedBody + 'Timestamp'), self.errorEstimator.positionTimestamp) print ("Initial error:") print (self.errorEstimator.error.value) return True def interactiveStart(self): while len(self.corba.signals()) == 3: raw_input("Press enter after starting evart-to-corba.") while len(self.corba.signal('left-foot').value) != 3: raw_input("Body not tracked...") return self.start()