def getFrameToOriginTransform(self, t): tCopy = transformUtils.copyFrame(t) tCopy.PostMultiply() tCopy.Concatenate( self.polaris.originFrame.transform.GetLinearInverse()) print transformUtils.poseFromTransform(tCopy) return tCopy
def moveHandModelToFrame(model, frame): pos, quat = transformUtils.poseFromTransform(frame) rpy = transformUtils.quaternionToRollPitchYaw(quat) pose = np.hstack((pos, rpy)) model.model.setJointPositions(pose, [ 'base_x', 'base_y', 'base_z', 'base_roll', 'base_pitch', 'base_yaw' ])
def onSpawnSphere(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='SphereAffordanceItem', Name='sphere', uuid=newUUID(), pose=pose) return self.affordanceManager.newAffordanceFromDescription(desc)
def spawnValveAffordance(self): radius = 0.10 tubeRadius = 0.02 position = [0, 0, 1.2] rpy = [0, 0, 0] t_feet_mid = self.footstepPlanner.getFeetMidPoint(self.robotModel) t = transformUtils.frameFromPositionAndRPY(position, rpy) t_grasp = self.computeRelativeGraspTransform() t_grasp.Concatenate(t) t_stance = self.computeRobotStanceFrame( t_grasp, self.computeRelativeStanceTransform()) t_valve = t_stance.GetInverse() # This is necessary to get the inversion to actually happen. We don't know why. t_valve.GetMatrix() t_valve.Concatenate(t) t_valve.Concatenate(t_feet_mid) pose = transformUtils.poseFromTransform(t_valve) desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0, 1, 0], Radius=float(radius), Segments=20) desc['Tube Radius'] = tubeRadius import affordancepanel obj = affordancepanel.panel.affordanceFromDescription(desc) obj.params = dict(radius=radius)
def spawnValveAffordance(self): radius = 0.10 tubeRadius = 0.02 position = [0, 0, 1.2] rpy = [0, 0, 0] t_feet_mid = self.footstepPlanner.getFeetMidPoint(self.robotModel) t = transformUtils.frameFromPositionAndRPY(position, rpy) t_grasp = self.computeRelativeGraspTransform() t_grasp.Concatenate(t) t_stance = self.computeRobotStanceFrame(t_grasp, self.computeRelativeStanceTransform()) t_valve = t_stance.GetInverse() # This is necessary to get the inversion to actually happen. We don't know why. t_valve.GetMatrix() t_valve.Concatenate(t) t_valve.Concatenate(t_feet_mid) pose = transformUtils.poseFromTransform(t_valve) desc = dict(classname='CapsuleRingAffordanceItem', Name='valve', uuid=newUUID(), pose=pose, Color=[0, 1, 0], Radius=float(radius), Segments=20) desc['Tube Radius'] = tubeRadius import affordancepanel obj = affordancepanel.panel.affordanceFromDescription(desc) obj.params = dict(radius=radius)
def checkGraspFrame(inputGraspFrame, side): ''' Return True if the given grasp frame matches the grasp frame of the teleop robot model's current pose, else False. ''' pose = teleopJointController.q teleopGraspFrame = ikPlanner.newGraspToWorldFrame(pose, side, ikPlanner.newGraspToHandFrame(side)) p1, q1 = transformUtils.poseFromTransform(inputGraspFrame) p2, q2 = transformUtils.poseFromTransform(teleopGraspFrame) try: np.testing.assert_allclose(p1, p2, rtol=1e-3) np.testing.assert_allclose(q1, q2, rtol=1e-3) return True except AssertionError: return False
def onSpawnMesh(self): d = DebugData() d.addArrow((0,0,0), (0,0,0.3)) pd = d.getPolyData() meshId = affordanceitems.MeshAffordanceItem.getMeshManager().add(pd) pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='MeshAffordanceItem', Name='mesh', Filename=meshId, uuid=newUUID(), pose=pose) return self.affordanceFromDescription(desc)
def onLocalPlaneFit(): planePoints, normal = segmentation.applyLocalPlaneFit(pointCloudObj.polyData, pickedPoint, searchRadius=0.1, searchRadiusEnd=0.2) obj = vis.showPolyData(planePoints, 'local plane fit', color=[0,1,0]) obj.setProperty('Point Size', 7) fields = segmentation.makePolyDataFields(obj.polyData) pose = transformUtils.poseFromTransform(fields.frame) desc = dict(classname='BoxAffordanceItem', Name='local plane', Dimensions=list(fields.dims), pose=pose) box = segmentation.affordanceManager.newAffordanceFromDescription(desc)
def onSpawnMesh(self): d = DebugData() d.addArrow((0,0,0), (0,0,0.3)) pd = d.getPolyData() meshId = affordanceitems.MeshAffordanceItem.getMeshManager().add(pd) pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='MeshAffordanceItem', Name='mesh', Filename=meshId, uuid=newUUID(), pose=pose) return self.affordanceManager.newAffordanceFromDescription(desc)
def checkGraspFrame(inputGraspFrame, side): ''' Return True if the given grasp frame matches the grasp frame of the teleop robot model's current pose, else False. ''' pose = teleopJointController.q teleopGraspFrame = ikPlanner.newGraspToWorldFrame( pose, side, ikPlanner.newGraspToHandFrame(side)) p1, q1 = transformUtils.poseFromTransform(inputGraspFrame) p2, q2 = transformUtils.poseFromTransform(teleopGraspFrame) try: np.testing.assert_allclose(p1, p2, rtol=1e-3) np.testing.assert_allclose(q1, q2, rtol=1e-3) return True except AssertionError: return False
def spawnBoxAffordanceAtFrame(self, boxFrame): print 'spawning switch box affordance' dimensions = [0.08, 0.19, 0.25] depth = dimensions[0] boxFrame.PreMultiply() boxFrame.Translate(depth/2.0, 0.0, 0.0) pose = transformUtils.poseFromTransform(boxFrame) desc = dict(classname='BoxAffordanceItem', Name='Switch Box', Dimensions=dimensions, pose=pose, Color=[0,1,0]) self.boxAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) self.updateReachFrame()
def spawnBoxAffordanceAtFrame(self, boxFrame): print 'spawning switch box affordance' dimensions = [0.08, 0.19, 0.25] depth = dimensions[0] boxFrame.PreMultiply() boxFrame.Translate(depth/2.0, 0.0, 0.0) pose = transformUtils.poseFromTransform(boxFrame) desc = dict(classname='BoxAffordanceItem', Name='Switch Box', Dimensions=dimensions, pose=pose, Color=[0,1,0]) self.boxAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) self.updateReachFrame()
def loadSDF(self, filename): sdf = pysdf.SDF(file=filename) for model in sdf.world.models: for link in model.links: if hasattr(link, 'submodels'): if len(link.submodels) > 0: print model.name + ' - This is an articulated object - SKIPPING!' break for col in link.collisions: t1 = transformUtils.getTransformFromNumpy(model.pose) t2 = transformUtils.getTransformFromNumpy(link.pose) t3 = transformUtils.getTransformFromNumpy(col.pose) t = t1 t.PreMultiply() t.Concatenate(t2) t.PreMultiply() t.Concatenate(t3) p = transformUtils.poseFromTransform(t) name = model.name if len(link.name) > 0 and link.name != model.name: name += '-' + link.name if len(col.name) > 0 and len(link.collisions) > 1: name += '-' + col.name if col.geometry_type == 'mesh': print 'Mesh geometry is unsupported - SKIPPING!' if col.geometry_type == 'image': print 'image geometry is unsupported - SKIPPING!' if col.geometry_type == 'height_map': print 'Height map geometry is unsupported - SKIPPING!' if col.geometry_type == 'plane': print 'Plane geometry is unsupported - SKIPPING!' if col.geometry_type == 'sphere': print 'Sphere geometry is unsupported - SKIPPING!' if col.geometry_type == 'box': desc = dict(classname='BoxAffordanceItem', Name=name, uuid=newUUID(), pose=p, Color=[0.8, 0.8, 0.8], Dimensions=map( float, col.geometry_data['size'].split(' '))) self.affordanceManager.newAffordanceFromDescription( desc) if col.geometry_type == 'cylinder': desc = dict(classname='CylinderAffordanceItem', Name=name, uuid=newUUID(), pose=p, Color=[0.8, 0.8, 0.8], Radius=float(col.geometry_data['radius']), Length=float(col.geometry_data['length'])) self.affordanceManager.newAffordanceFromDescription( desc)
def __init__(self): pose = transformUtils.poseFromTransform(vtk.vtkTransform()) self.pointcloud = ioUtils.readPolyData(ddapp.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp') self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None) segmentation.makeMovable(self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([ 1.0, 0. , 0. , 0.0]))) self.originFrame = self.pointcloudPD.getChildFrame() t = transformUtils.transformFromPose(array([-4.39364111, -0.51507392, -0.73125563]), array([ 0.93821625, 0. , 0. , -0.34604951])) self.valveWalkFrame = vis.updateFrame(t, 'ValveWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-3.31840048, 0.36408685, -0.67413123]), array([ 0.93449475, 0. , 0. , -0.35597691])) self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.24553758, -0.52990939, -0.73255338]), array([ 0.93697004, 0. , 0. , -0.34940972])) self.drillWalkFrame = vis.updateFrame(t, 'DrillWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.51306835, -0.92994004, -0.74173541 ]), array([-0.40456572, 0. , 0. , 0.91450893])) self.drillWallWalkFarthestSafeFrame = vis.updateFrame(t, 'DrillWallWalkFarthestSafe', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.5314524 , -0.27401861, -0.71302976]), array([ 0.98691519, 0. , 0. , -0.16124022])) self.drillWallWalkBackFrame = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-1.16122318, 0.04723203, -0.67493468]), array([ 0.93163145, 0. , 0. , -0.36340451])) self.surprisePreWalkFrame = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.5176186 , -1.00151554, -0.70650799]), array([ 0.84226497, 0. , 0. , -0.53906374])) self.surpriseWalkFrame = vis.updateFrame(t, 'SurpriseWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.69100097, -0.43713269, -0.68495922]), array([ 0.98625075, 0. , 0. , -0.16525575])) self.surpriseWalkBackFrame = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 0.65827322, -0.08028796, -0.77370834]), array([ 0.94399977, 0. , 0. , -0.3299461 ])) self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 5.47126425, -0.09790393, -0.70504679]), array([ 1., 0., 0., 0.])) self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) self.frameSync = vis.FrameSync() self.frameSync.addFrame(self.originFrame) self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
def __init__(self): pose = transformUtils.poseFromTransform(vtk.vtkTransform()) self.pointcloud = ioUtils.readPolyData(ddapp.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp') self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None) segmentation.makeMovable(self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([ 1.0, 0. , 0. , 0.0]))) self.originFrame = self.pointcloudPD.getChildFrame() t = transformUtils.transformFromPose(array([-4.39364111, -0.51507392, -0.73125563]), array([ 0.93821625, 0. , 0. , -0.34604951])) self.valveWalkFrame = vis.updateFrame(t, 'ValveWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-3.31840048, 0.36408685, -0.67413123]), array([ 0.93449475, 0. , 0. , -0.35597691])) self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.24553758, -0.52990939, -0.73255338]), array([ 0.93697004, 0. , 0. , -0.34940972])) self.drillWalkFrame = vis.updateFrame(t, 'DrillWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.51306835, -0.92994004, -0.74173541 ]), array([-0.40456572, 0. , 0. , 0.91450893])) self.drillWallWalkFarthestSafeFrame = vis.updateFrame(t, 'DrillWallWalkFarthestSafe', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.5314524 , -0.27401861, -0.71302976]), array([ 0.98691519, 0. , 0. , -0.16124022])) self.drillWallWalkBackFrame = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-1.16122318, 0.04723203, -0.67493468]), array([ 0.93163145, 0. , 0. , -0.36340451])) self.surprisePreWalkFrame = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.5176186 , -1.00151554, -0.70650799]), array([ 0.84226497, 0. , 0. , -0.53906374])) self.surpriseWalkFrame = vis.updateFrame(t, 'SurpriseWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.69100097, -0.43713269, -0.68495922]), array([ 0.98625075, 0. , 0. , -0.16525575])) self.surpriseWalkBackFrame = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 0.65827322, -0.08028796, -0.77370834]), array([ 0.94399977, 0. , 0. , -0.3299461 ])) self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 5.47126425, -0.09790393, -0.70504679]), array([ 1., 0., 0., 0.])) self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) self.frameSync = vis.FrameSync() self.frameSync.addFrame(self.originFrame) self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
def default(self, obj): if isinstance(obj, vtk.vtkTransform): pos, quat = transformUtils.poseFromTransform(obj) return OrderedDict(position=pos, quaternion=quat) elif isinstance(obj, fieldcontainer.FieldContainer): d = OrderedDict() d['class'] = type(obj).__name__ for key in obj._fields: d[key] = getattr(obj, key) return d return nje.NumpyEncoder.default(self, obj)
def default(self, obj): if isinstance(obj, vtk.vtkTransform): pos, quat = transformUtils.poseFromTransform(obj) return OrderedDict(position=pos, quaternion=quat) elif isinstance(obj, fieldcontainer.FieldContainer): d = OrderedDict() d['class'] = type(obj).__name__ for key in obj._fields: d[key] = getattr(obj, key) return d return nje.NumpyEncoder.default(self, obj)
def publishCorrection(self, channel='POSE_YAW_LOCK'): pelvisToWorld = self.getPelvisEstimate() position, quat = transformUtils.poseFromTransform(pelvisToWorld) msg = lcmbot.pose_t() msg.utime = robotStateJointController.lastRobotStateMessage.utime msg.pos = [0.0, 0.0, 0.0] msg.orientation = quat.tolist() lcmUtils.publish(channel, msg)
def publishCorrection(self, channel='POSE_YAW_LOCK'): pelvisToWorld = self.getPelvisEstimate() position, quat = transformUtils.poseFromTransform(pelvisToWorld) msg = lcmbot.pose_t() msg.utime = robotStateJointController.lastRobotStateMessage.utime msg.pos = [0.0, 0.0, 0.0] msg.orientation = quat.tolist() lcmUtils.publish(channel, msg)
def getPlanningFrame(self): platformToWorld = self.platform.getChildFrame().transform worldToPlatform = platformToWorld.GetLinearInverse() f = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel) footPosition = f.GetPosition() footPosInPlatformFrame = worldToPlatform.TransformPoint(footPosition) planFramePosInPlatformFrame = self.dimensions/2.0 planFramePosInPlatformFrame[1] = footPosInPlatformFrame[1] planFramePosInWorld = platformToWorld.TransformPoint(planFramePosInPlatformFrame) # now we want to find the homogeneous transform for the planning Frame _,quat = transformUtils.poseFromTransform(platformToWorld) self.planToWorld = transformUtils.transformFromPose(planFramePosInWorld,quat)
def getPlanningFrame(self): platformToWorld = self.platform.getChildFrame().transform worldToPlatform = platformToWorld.GetLinearInverse() f = self.robotSystem.footstepsDriver.getFeetMidPoint( self.robotSystem.robotStateModel) footPosition = f.GetPosition() footPosInPlatformFrame = worldToPlatform.TransformPoint(footPosition) planFramePosInPlatformFrame = self.dimensions / 2.0 planFramePosInPlatformFrame[1] = footPosInPlatformFrame[1] planFramePosInWorld = platformToWorld.TransformPoint( planFramePosInPlatformFrame) # now we want to find the homogeneous transform for the planning Frame _, quat = transformUtils.poseFromTransform(platformToWorld) self.planToWorld = transformUtils.transformFromPose( planFramePosInWorld, quat)
def spawnRunningBoardAffordance(self): boxDimensions = [0.4, 1.0, 0.05] stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False) boxFrame = transformUtils.copyFrame(stanceFrame) boxFrame.PreMultiply() boxFrame.Translate(0.0, 0.0, -boxDimensions[2]/2.0) box = om.findObjectByName('running board') if not box: pose = transformUtils.poseFromTransform(boxFrame) desc = dict(classname='BoxAffordanceItem', Name='running board', Dimensions=boxDimensions, pose=pose) box = self.robotSystem.affordanceManager.newAffordanceFromDescription(desc) return box
def spawnRunningBoardAffordance(self): boxDimensions = [0.4, 1.0, 0.05] stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint( self.robotSystem.robotStateModel, useWorldZ=False) boxFrame = transformUtils.copyFrame(stanceFrame) boxFrame.PreMultiply() boxFrame.Translate(0.0, 0.0, -boxDimensions[2] / 2.0) box = om.findObjectByName('running board') if not box: pose = transformUtils.poseFromTransform(boxFrame) desc = dict(classname='BoxAffordanceItem', Name='running board', Dimensions=boxDimensions, pose=pose) box = self.robotSystem.affordanceManager.newAffordanceFromDescription( desc) return box
def addCollisionObject(self, obj): if om.getOrCreateContainer('affordances').findChild(obj.getProperty('Name') + ' affordance'): return # Affordance has been created previously frame = obj.findChild(obj.getProperty('Name') + ' frame') (origin, quat) = transformUtils.poseFromTransform(frame.transform) (xaxis, yaxis, zaxis) = transformUtils.getAxesFromTransform(frame.transform) # TODO: move this into transformUtils as getAxisDimensions or so box = obj.findChild(obj.getProperty('Name') + ' box') box_np = vtkNumpy.getNumpyFromVtk(box.polyData, 'Points') box_min = np.amin(box_np, 0) box_max = np.amax(box_np, 0) xwidth = np.linalg.norm(box_max[0]-box_min[0]) ywidth = np.linalg.norm(box_max[1]-box_min[1]) zwidth = np.linalg.norm(box_max[2]-box_min[2]) name = obj.getProperty('Name') + ' affordance' boxAffordance = segmentation.createBlockAffordance(origin, xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, name, parent='affordances') boxAffordance.setSolidColor(obj.getProperty('Color')) boxAffordance.setProperty('Alpha', 0.3)
def loadSDF(self, filename): sdf = pysdf.SDF(file=filename) for model in sdf.world.models: for link in model.links: if hasattr(link, 'submodels'): if len(link.submodels)>0: print model.name+' - This is an articulated object - SKIPPING!' break for col in link.collisions: t1=transformUtils.getTransformFromNumpy(model.pose) t2=transformUtils.getTransformFromNumpy(link.pose) t3=transformUtils.getTransformFromNumpy(col.pose) t=t1 t.PreMultiply() t.Concatenate(t2) t.PreMultiply() t.Concatenate(t3) p = transformUtils.poseFromTransform(t) name = model.name; if len(link.name)>0 and link.name != model.name: name+='-'+link.name if len(col.name)>0 and len(link.collisions)>1: name+='-'+col.name if col.geometry_type=='mesh': print 'Mesh geometry is unsupported - SKIPPING!' if col.geometry_type=='image': print 'image geometry is unsupported - SKIPPING!' if col.geometry_type=='height_map': print 'Height map geometry is unsupported - SKIPPING!' if col.geometry_type=='plane': print 'Plane geometry is unsupported - SKIPPING!' if col.geometry_type=='sphere': print 'Sphere geometry is unsupported - SKIPPING!' if col.geometry_type=='box': desc = dict(classname='BoxAffordanceItem', Name=name, uuid=newUUID(), pose=p, Color=[0.8, 0.8, 0.8], Dimensions=map(float, col.geometry_data['size'].split(' '))) self.affordanceManager.newAffordanceFromDescription(desc) if col.geometry_type=='cylinder': desc = dict(classname='CylinderAffordanceItem', Name=name, uuid=newUUID(), pose=p, Color=[0.8, 0.8, 0.8], Radius=float(col.geometry_data['radius']), Length = float(col.geometry_data['length'])) self.affordanceManager.newAffordanceFromDescription(desc)
def _getCommands(self, commands, constraintNames, suffix): assert self.linkName varName = 'quat_constraint%s' % suffix constraintNames.append(varName) quat = self.quaternion if isinstance(quat, vtk.vtkTransform): _, quat = poseFromTransform(quat) formatArgs = dict(varName=varName, robotArg=self.robotArg, tspan=self.getTSpanString(), linkName=self.linkName, quat=self.toColumnVectorString(quat), #tolerance=repr(math.sin(math.radians(self.angleToleranceInDegrees))**2)) tolerance=repr(math.radians(self.angleToleranceInDegrees))) commands.append( '{varName} = WorldQuatConstraint({robotArg}, links.{linkName}, ' '{quat}, {tolerance}, {tspan});' ''.format(**formatArgs))
def _getCommands(self, commands, constraintNames, suffix): assert self.linkName varName = 'gaze_orient_constraint%s' % suffix constraintNames.append(varName) quat = self.quaternion if isinstance(quat, vtk.vtkTransform): _, quat = poseFromTransform(quat) formatArgs = dict(varName=varName, robotArg=self.robotArg, tspan=self.getTSpanString(), linkName=self.linkName, quat=self.toColumnVectorString(quat), axis=self.toColumnVectorString(self.axis), coneThreshold=repr(self.coneThreshold), threshold=repr(self.threshold)) commands.append( '{varName} = WorldGazeOrientConstraint({robotArg}, links.{linkName}, {axis}, ' '{quat}, {coneThreshold}, {threshold}, {tspan});' ''.format(**formatArgs))
def getFrameToOriginTransform(self, t): tCopy = transformUtils.copyFrame(t) tCopy.PostMultiply() tCopy.Concatenate(self.polaris.originFrame.transform.GetLinearInverse()) print transformUtils.poseFromTransform(tCopy) return tCopy
def __init__(self): self.aprilTagSubsciber = lcmUtils.addSubscriber('APRIL_TAG_TO_CAMERA_LEFT', lcmbotcore.rigid_transform_t, self.onAprilTag) pose = transformUtils.poseFromTransform(vtk.vtkTransform()) desc = dict(classname='MeshAffordanceItem', Name='polaris', Filename='software/models/polaris/polaris_cropped.vtp', pose=pose) self.pointcloudAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) self.originFrame = self.pointcloudAffordance.getChildFrame() self.originToAprilTransform = transformUtils.transformFromPose(np.array([-0.038508 , -0.00282131, -0.01000079]), np.array([ 9.99997498e-01, -2.10472556e-03, -1.33815696e-04, 7.46246794e-04])) # offset for . . . who knows why # t = transformUtils.transformFromPose(np.array([ 0.14376024, 0.95920689, 0.36655712]), np.array([ 0.28745842, 0.90741428, -0.28822068, 0.10438304])) t = transformUtils.transformFromPose(np.array([ 0.10873244, 0.93162364, 0.40509084]), np.array([ 0.32997378, 0.88498408, -0.31780588, 0.08318602])) self.leftFootEgressStartFrame = vis.updateFrame(t, 'left foot start', scale=0.2,visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.265, 0.874, 0.274]), np.array([ 0.35290731, 0.93443693, -0.04181263, 0.02314636])) self.leftFootEgressMidFrame = vis.updateFrame(t, 'left foot mid', scale=0.2,visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.54339115, 0.89436275, 0.26681047]), np.array([ 0.34635985, 0.93680077, -0.04152008, 0.02674412])) self.leftFootEgressOutsideFrame = vis.updateFrame(t, 'left foot outside', scale=0.2,visible=True, parent=self.pointcloudAffordance) # pose = [np.array([-0.78962299, 0.44284877, -0.29539116]), np.array([ 0.54812954, 0.44571517, -0.46063251, 0.53731713])] #old location # pose = [np.array([-0.78594663, 0.42026626, -0.23248139]), np.array([ 0.54812954, 0.44571517, -0.46063251, 0.53731713])] # updated location pose = [np.array([-0.78594663, 0.42026626, -0.23248139]), np.array([ 0.53047159, 0.46554963, -0.48086192, 0.52022615])] # update orientation desc = dict(classname='CapsuleRingAffordanceItem', Name='Steering Wheel', uuid=newUUID(), pose=pose, Color=[1, 0, 0], Radius=float(0.18), Segments=20) self.steeringWheelAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) pose = [np.array([-0.05907324, 0.80460545, 0.45439687]), np.array([ 0.14288327, 0.685944 , -0.703969 , 0.11615873])] desc = dict(classname='BoxAffordanceItem', Name='pedal', Dimensions=[0.12, 0.33, 0.04], pose=pose, Color=[0,1,0]) self.pedalAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) # t = transformUtils.transformFromPose(np.array([ 0.04045136, 0.96565326, 0.25810111]), # np.array([ 0.26484648, 0.88360091, -0.37065556, -0.10825996])) # t = transformUtils.transformFromPose(np.array([ -4.34908919e-04, 9.24901627e-01, 2.65614116e-01]), # np.array([ 0.25022251, 0.913271 , -0.32136359, -0.00708626])) t = transformUtils.transformFromPose(np.array([ 0.0384547 , 0.89273742, 0.24140762]), np.array([ 0.26331831, 0.915796 , -0.28055337, 0.11519963])) self.leftFootPedalSwingFrame = vis.updateFrame(t,'left foot pedal swing', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([-0.9012598 , -0.05709763, 0.34897024]), np.array([ 0.03879584, 0.98950919, 0.03820214, 0.13381721])) self.leftFootDrivingFrame = vis.updateFrame(t,'left foot driving', scale=0.2, visible=True, parent=self.pointcloudAffordance) # t = transformUtils.transformFromPose(np.array([-0.12702725, 0.92068409, 0.27209386]), # np.array([ 0.2062255 , 0.92155886, -0.30781119, 0.11598529])) # t = transformUtils.transformFromPose(np.array([-0.14940375, 0.90347275, 0.23812658]), # np.array([ 0.27150909, 0.91398724, -0.28877386, 0.0867167 ])) # t = transformUtils.transformFromPose(np.array([-0.17331227, 0.87879312, 0.25645152]), # np.array([ 0.26344489, 0.91567196, -0.28089824, 0.11505581])) # self.leftFootDrivingKneeInFrame = vis.updateFrame(t,'left foot driving knee in', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([-0.12702725, 0.92068409, 0.27209386]), np.array([ 0.2062255 , 0.92155886, -0.30781119, 0.11598529])) self.leftFootDrivingKneeInFrame = vis.updateFrame(t,'left foot driving knee in', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([-0.13194951, 0.89871423, 0.24956246]), np.array([ 0.21589082, 0.91727326, -0.30088849, 0.14651633])) self.leftFootOnPedal = vis.updateFrame(t,'left foot on pedal', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.17712239, 0.87619935, 0.27001509]), np.array([ 0.33484372, 0.88280787, -0.31946488, 0.08044963])) self.leftFootUpFrame = vis.updateFrame(t,'left foot up frame', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.47214939, -0.04856998, 0.01375837]), np.array([ 6.10521653e-03, 4.18621358e-04, 4.65520611e-01, 8.85015882e-01])) self.rightHandGrabFrame = vis.updateFrame(t,'right hand grab bar', scale=0.2, visible=True, parent=self.pointcloudAffordance) self.frameSync = vis.FrameSync() self.frameSync.addFrame(self.originFrame) self.frameSync.addFrame(self.pointcloudAffordance.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.leftFootEgressStartFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootEgressMidFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootEgressOutsideFrame, ignoreIncoming=True) self.frameSync.addFrame(self.steeringWheelAffordance.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.pedalAffordance.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.leftFootPedalSwingFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootDrivingFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootDrivingKneeInFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootOnPedal, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootUpFrame, ignoreIncoming=True) self.frameSync.addFrame(self.rightHandGrabFrame, ignoreIncoming=True)
def toQuaternion(quat): if isinstance(quat, vtk.vtkTransform): _, quat = poseFromTransform(quat) assert quat.shape == (4,) return quat
def poseFromAffordance(aff): t = aff.getChildFrame().transform position, quat = transformUtils.poseFromTransform(t) rpy = transformUtils.rollPitchYawFromTransform(t) return urdf.Pose(position, rpy)
def moveHandModelToFrame(model, frame): pos, quat = transformUtils.poseFromTransform(frame) rpy = transformUtils.quaternionToRollPitchYaw(quat) pose = np.hstack((pos, rpy)) model.model.setJointPositions(pose, ['base_x', 'base_y', 'base_z', 'base_roll', 'base_pitch', 'base_yaw'])
def onSpawnCylinder(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='CylinderAffordanceItem', Name='cylinder', uuid=newUUID(), pose=pose) return self.affordanceManager.newAffordanceFromDescription(desc)
def onSpawnRing(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='CapsuleRingAffordanceItem', Name='ring', uuid=newUUID(), pose=pose) return self.affordanceFromDescription(desc)
def onSpawnBox(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='BoxAffordanceItem', Name='box', uuid=newUUID(), pose=pose) return self.affordanceFromDescription(desc)
def onSpawnSphere(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='SphereAffordanceItem', Name='sphere', uuid=newUUID(), pose=pose) return self.affordanceFromDescription(desc)
def onSpawnRing(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='CapsuleRingAffordanceItem', Name='ring', uuid=newUUID(), pose=pose) return self.affordanceManager.newAffordanceFromDescription(desc)
def poseFromAffordance(aff): t = aff.getChildFrame().transform position, quat = transformUtils.poseFromTransform(t) rpy = transformUtils.rollPitchYawFromTransform(t) return urdf.Pose(position, rpy)
def moveHandModelToFrame(model, frame): pos, quat = transformUtils.poseFromTransform(frame) rpy = botpy.quat_to_roll_pitch_yaw(quat) pose = np.hstack((pos, rpy)) model.model.setJointPositions(pose, ["base_x", "base_y", "base_z", "base_roll", "base_pitch", "base_yaw"])
def getPose(self): childFrame = self.getChildFrame() t = childFrame.transform if childFrame else vtk.vtkTransform() return transformUtils.poseFromTransform(t)
def toPositionQuaternionString(pose): if isinstance(pose, vtk.vtkTransform): pos, quat = poseFromTransform(pose) pose = np.hstack((pos, quat)) assert pose.shape == (7,) return ConstraintBase.toColumnVectorString(pose)
def getPose(self): childFrame = self.getChildFrame() t = childFrame.transform if childFrame else vtk.vtkTransform() return transformUtils.poseFromTransform(t)
def onFootstepStatus(self, msg): #print "got message" import ipab #print msg.actual_foot_position_in_world[0], msg.actual_foot_position_in_world[1], msg.actual_foot_position_in_world[2] #print msg.actual_foot_orientation_in_world[0], msg.actual_foot_orientation_in_world[1], msg.actual_foot_orientation_in_world[2], msg.actual_foot_orientation_in_world[3] #print msg.LEFT #print msg.RIGHT x = msg.actual_foot_position_in_world[0] y = msg.actual_foot_position_in_world[1] z = msg.actual_foot_position_in_world[2] q1 = msg.actual_foot_orientation_in_world[0] q2 = msg.actual_foot_orientation_in_world[1] q3 = msg.actual_foot_orientation_in_world[2] q4 = msg.actual_foot_orientation_in_world[3] if msg.status == 1: tf_footStatus = transformUtils.transformFromPose([x,y,z], [q1,q2,q3,q4]) self.transforms_series[:] = [] self.transforms_series.append(tf_footStatus) self.transforms_series.append(self.tf_robotStatus.GetInverse()) tf_foot_robot = transformUtils.concatenateTransforms(self.transforms_series) self.footstep_index = self.footstep_index + 1 # Check what foot is in contact #print 'self.footstep_index' #print self.footstep_index [robot_pos, robot_ori] = transformUtils.poseFromTransform(self.tf_robotStatus) [current_pos, current_ori] = transformUtils.poseFromTransform(tf_foot_robot) if (current_pos[1] > 0): current_left = True else: current_left = False #print 'current:' #print (current_pos) #print 'robot:' #print (robot_pos) #print 'Left?' #print current_left # I want to take the first status for the LEFT foot if self.new_status and current_left: # left foot in contact (reached left single support) self.footStatus.append(Footstep(tf_footStatus, 0)) self.new_first_double_supp = True self.new_status = False # right foot expected pose (from planning) if self.footstep_index+1 < len(self.planned_footsteps): if (self.planned_footsteps[self.footstep_index+1].is_right_foot): self.footStatus.append(self.planned_footsteps[self.footstep_index+1]) else: self.footStatus.append(self.planned_footsteps[self.footstep_index]) else: self.footStatus.append(self.footStatus_right[len(self.footStatus_right)-1]) elif not current_left: # right foot in contact self.footStatus_right.append(Footstep(tf_footStatus, 1)) else: self.new_status = True if (len(self.footStatus) > 0 and self.new_first_double_supp): t1 = self.footStatus[len(self.footStatus)-2].transform t2 = self.footStatus[len(self.footStatus)-1].transform #print 't1 and t2:' #print (transformUtils.poseFromTransform(t1)) #print (transformUtils.poseFromTransform(t2)) #print 'list lenght:' #print (len(self.footStatus)) self.new_first_double_supp = False if self.footStatus[len(self.footStatus)-2].is_right_foot: t1, t2 = t2, t1 pose = self.getNextDoubleSupportPose(t1, t2) self.displayExpectedPose(pose) standingFootName = self.ikPlanner.rightFootLink if self.footStatus[len(self.footStatus)-2].is_right_foot else self.ikPlanner.leftFootLink self.makeReplanRequest(standingFootName, removeFirstLeftStep = False, nextDoubleSupportPose=pose)