def getFrameToOriginTransform(self, t): tCopy = transformUtils.copyFrame(t) tCopy.PostMultiply() tCopy.Concatenate( self.polaris.originFrame.transform.GetLinearInverse()) print transformUtils.poseFromTransform(tCopy) return tCopy
def onSpawnBox(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='BoxAffordanceItem', Name='box', uuid=newUUID(), pose=pose) return self.affordanceManager.newAffordanceFromDescription(desc)
def saveRegistrationResults(self, filename=None): registrationResultDict = dict() if os.path.isfile(self.pathDict['registrationResult']): registrationResultDict = utils.getDictFromYamlFilename( self.pathDict['registrationResult']) affordanceFolder = om.getOrCreateContainer('affordances') affordanceList = affordanceFolder.children() for affordance in affordanceList: objectName = affordance.getProperty('Name') modelToWorld = affordance.getChildFrame().transform modelToFirstFrame = transformUtils.concatenateTransforms([ modelToWorld, self.firstFrameToWorldTransform.GetLinearInverse() ]) pose = transformUtils.poseFromTransform(modelToFirstFrame) poseAsList = [pose[0].tolist(), pose[1].tolist()] d = dict() meshFilename = affordance.getProperty('Filename') relPathToDataDir = os.path.relpath(meshFilename, utils.getLabelFusionDataDir()) d['filename'] = relPathToDataDir d['pose'] = poseAsList registrationResultDict[objectName] = d if filename is None: filename = self.pathDict['registrationResult'] utils.saveDictToYaml(registrationResultDict, filename)
def saveTableFrame(self): # d = CorlUtils.getDictFromYamlFilename(self.savedTransformFilename) d = dict() (pos, quat) = transformUtils.poseFromTransform(self.tableFrame.transform) d['table frame'] = [pos.tolist(), quat.tolist()] CorlUtils.saveDictToYaml(d, self.savedTransformFilename)
def loadSDF(self, filename): sdf = pysdf.SDF(file=filename) for model in sdf.world.models: for link in model.links: if hasattr(link, "submodels"): if len(link.submodels) > 0: print model.name + " - This is an articulated object - SKIPPING!" break for col in link.collisions: t1 = transformUtils.getTransformFromNumpy(model.pose) t2 = transformUtils.getTransformFromNumpy(link.pose) t3 = transformUtils.getTransformFromNumpy(col.pose) t = t1 t.PreMultiply() t.Concatenate(t2) t.PreMultiply() t.Concatenate(t3) p = transformUtils.poseFromTransform(t) name = "" # model.name color = col.color[0:3] if col.color is not None else [ 0.8, 0.8, 0.8 ] if len(link.name) > 0: # and link.name != model.name: name += link.name if len(col.name) > 0 and len(link.collisions) > 1: name += "-" + col.name if col.geometry_type == "mesh": print "Mesh geometry is unsupported - SKIPPING!" if col.geometry_type == "image": print "image geometry is unsupported - SKIPPING!" if col.geometry_type == "height_map": print "Height map geometry is unsupported - SKIPPING!" if col.geometry_type == "plane": print "Plane geometry is unsupported - SKIPPING!" if col.geometry_type == "sphere": print "Sphere geometry is unsupported - SKIPPING!" if col.geometry_type == "box": desc = dict( classname="BoxAffordanceItem", Name=name, uuid=newUUID(), pose=p, Color=color, Dimensions=map( float, col.geometry_data["size"].split(" ")), ) self.affordanceManager.newAffordanceFromDescription( desc) if col.geometry_type == "cylinder": desc = dict( classname="CylinderAffordanceItem", Name=name, uuid=newUUID(), pose=p, Color=color, Radius=float(col.geometry_data["radius"]), Length=float(col.geometry_data["length"]), ) self.affordanceManager.newAffordanceFromDescription( desc)
def loadObjectMeshes(affordanceManager, registrationResultFilename, firstFrameToWorldTransform): """ Loads the object meshes from the registration_result.yaml file :param affordanceManager: :param registrationResultFilename: filename of registration_result.yaml, should be an absolute path :param transformsFilename: filename of transforms.yaml where firstFrameToWorld transform is. :return: None """ stream = file(registrationResultFilename) registrationResult = yaml.load(stream) for objName, data in registrationResult.iteritems(): objectMeshFilename = data[ 'filename'] # should be relative to getLabelFusionDataDir() if len(objectMeshFilename) == 0: objectMeshFilename = getObjectMeshFilename(objName) else: objectMeshFilename = os.path.join(getLabelFusionDataDir(), objectMeshFilename) # figure out object pose in world frame # we have stored object pose in first camera frame objectToFirstFrame = transformUtils.transformFromPose( data['pose'][0], data['pose'][1]) objectToWorld = transformUtils.concatenateTransforms( [objectToFirstFrame, firstFrameToWorldTransform]) pose = transformUtils.poseFromTransform(objectToWorld) loadAffordanceModel(affordanceManager, name=objName, filename=objectMeshFilename, pose=pose)
def rigidTransformMessageFromFrame(transform): ''' Given a vtkTransform, returns a bot_core.rigid_transform_t message ''' msg = bot_core.rigid_transform_t() msg.trans, msg.quat = transformUtils.poseFromTransform(transform) return msg
def moveHandModelToFrame(model, frame): pos, quat = transformUtils.poseFromTransform(frame) rpy = transformUtils.quaternionToRollPitchYaw(quat) pose = np.hstack((pos, rpy)) model.model.setJointPositions(pose, [ 'base_x', 'base_y', 'base_z', 'base_roll', 'base_pitch', 'base_yaw' ])
def onSpawnSphere(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname="SphereAffordanceItem", Name="sphere", uuid=newUUID(), pose=pose) return self.affordanceManager.newAffordanceFromDescription(desc)
def addCameraPosesToDataFile(calibrationData, cameraPoses): # augment data with matched camera poses # NOTE: camera poses are defined relative to first camera frame, hence the first one will look unintialized for index, value in enumerate(calibrationData): transform = cameraPoses.getCameraPoseAtUTime(value['utime']) (pos, quat) = transformUtils.poseFromTransform(transform) calibrationData[index]['camera_frame'] = dict() calibrationData[index]['camera_frame']['quaternion'] = dict() calibrationData[index]['camera_frame']['quaternion']['w'] = float( quat[0]) calibrationData[index]['camera_frame']['quaternion']['x'] = float( quat[1]) calibrationData[index]['camera_frame']['quaternion']['y'] = float( quat[2]) calibrationData[index]['camera_frame']['quaternion']['z'] = float( quat[3]) calibrationData[index]['camera_frame']['translation'] = dict() calibrationData[index]['camera_frame']['translation']['x'] = float( pos[0]) calibrationData[index]['camera_frame']['translation']['y'] = float( pos[1]) calibrationData[index]['camera_frame']['translation']['z'] = float( pos[2]) return calibrationData
def saveObjectPoses(self, imageFilename, cameraToCameraStart, baseName): # count pixels img = scipy.misc.imread(imageFilename) assert img.dtype == np.uint8 if img.ndim in (3, 4): img = img[:, :, 0] else: assert img.ndim == 2 labels, counts = np.unique(img, return_counts=True) labelToCount = dict(zip(labels, counts)) num_pixels_per_class = np.array([]) for i in xrange(0, img.max() + 1): num_pixels = labelToCount.get(i, 0) num_pixels_per_class = np.append(num_pixels_per_class, num_pixels) pose_file_name = baseName + "_poses.yaml" target = open(pose_file_name, 'w') print 'writing:', pose_file_name # iterate through each class with 1 or more pixel and save pose... for index, val in enumerate(num_pixels_per_class): if index == 0: # don't save for background class continue if val > 0: cameraStartToCamera = cameraToCameraStart.GetLinearInverse() objectName = utils.getObjectName(self.objectData, index) target.write(objectName + ":") target.write("\n") target.write(" label: " + str(index)) target.write("\n") target.write(" num_pixels: " + str(val)) target.write("\n") objToCameraStart = self.objectToWorld[objectName] objToCamera = transformUtils.concatenateTransforms( [objToCameraStart, cameraStartToCamera]) pose = transformUtils.poseFromTransform(objToCamera) #Write T Matrix target.write(" cam_R_m2c: " + str(pose[0].tolist())) target.write("\n") print "cam_R_m2c:\n" print pose q7 = Quaternion(pose[1]) c = q7.rotation_matrix target.write(" cam_t_m2c: " + str(c.flatten().tolist())) target.write("\n") print "cam_t_m2c:\n" print pose print "\n" target.close()
def setRobotPoseFromOptitrack(robotToBodyOffset=[0.0, -0.035, 0.0, -90, -90.5, 0], optitrackBodyName='Body 1'): bodyToWorld = om.findObjectByName(optitrackBodyName + ' frame').transform robotToBody = transformUtils.frameFromPositionAndRPY(robotToBodyOffset[:3], robotToBodyOffset[3:]) robotToWorld = transformUtils.concatenateTransforms([robotToBody, bodyToWorld]) pos, quat = transformUtils.poseFromTransform(robotToWorld) rpy = transformUtils.quaternionToRollPitchYaw(quat) robotSystem.robotStateJointController.q[:6] = np.hstack((pos,rpy)) robotSystem.robotStateJointController.push()
def makeBox(): ''' has property Dimensions ''' desc = dict(classname='BoxAffordanceItem', Name='box', pose=transformUtils.poseFromTransform(vtk.vtkTransform())) box = newAffordanceFromDescription(desc) box.getChildFrame().setProperty('Scale', 0.1) return box
def onSpawnCylinder(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict( classname="CylinderAffordanceItem", Name="cylinder", uuid=newUUID(), pose=pose, ) return self.affordanceManager.newAffordanceFromDescription(desc)
def checkGraspFrame(inputGraspFrame, side): ''' Return True if the given grasp frame matches the grasp frame of the teleop robot model's current pose, else False. ''' pose = teleopJointController.q teleopGraspFrame = ikPlanner.newGraspToWorldFrame(pose, side, ikPlanner.newGraspToHandFrame(side)) p1, q1 = transformUtils.poseFromTransform(inputGraspFrame) p2, q2 = transformUtils.poseFromTransform(teleopGraspFrame) try: np.testing.assert_allclose(p1, p2, rtol=1e-3) np.testing.assert_allclose(q1, q2, rtol=1e-3) return True except AssertionError: return False
def loadSDF(self, filename): sdf = pysdf.SDF(file=filename) for model in sdf.world.models: for link in model.links: if hasattr(link, 'submodels'): if len(link.submodels) > 0: print model.name + ' - This is an articulated object - SKIPPING!' break for col in link.collisions: t1 = transformUtils.getTransformFromNumpy(model.pose) t2 = transformUtils.getTransformFromNumpy(link.pose) t3 = transformUtils.getTransformFromNumpy(col.pose) t = t1 t.PreMultiply() t.Concatenate(t2) t.PreMultiply() t.Concatenate(t3) p = transformUtils.poseFromTransform(t) name = model.name color = col.color[0:3] if col.color is not None else [ 0.8, 0.8, 0.8 ] if len(link.name) > 0 and link.name != model.name: name += '-' + link.name if len(col.name) > 0 and len(link.collisions) > 1: name += '-' + col.name if col.geometry_type == 'mesh': print 'Mesh geometry is unsupported - SKIPPING!' if col.geometry_type == 'image': print 'image geometry is unsupported - SKIPPING!' if col.geometry_type == 'height_map': print 'Height map geometry is unsupported - SKIPPING!' if col.geometry_type == 'plane': print 'Plane geometry is unsupported - SKIPPING!' if col.geometry_type == 'sphere': print 'Sphere geometry is unsupported - SKIPPING!' if col.geometry_type == 'box': desc = dict(classname='BoxAffordanceItem', Name=name, uuid=newUUID(), pose=p, Color=color, Dimensions=map( float, col.geometry_data['size'].split(' '))) self.affordanceManager.newAffordanceFromDescription( desc) if col.geometry_type == 'cylinder': desc = dict(classname='CylinderAffordanceItem', Name=name, uuid=newUUID(), pose=p, Color=color, Radius=float(col.geometry_data['radius']), Length=float(col.geometry_data['length'])) self.affordanceManager.newAffordanceFromDescription( desc)
def onSpawnRing(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict( classname="CapsuleRingAffordanceItem", Name="ring", uuid=newUUID(), pose=pose, ) return self.affordanceManager.newAffordanceFromDescription(desc)
def loadMomapModel(name, meshFile): momapObjectsRepo = os.path.expanduser( '~/catkin_ws/src/perception_deps/objects') meshFileName = os.path.join(momapObjectsRepo, meshFile) return affordanceManager.newAffordanceFromDescription( dict(classname='MeshAffordanceItem', Name=name, pose=transformUtils.poseFromTransform(vtk.vtkTransform()), Filename=meshFileName))
def onLocalPlaneFit(): planePoints, normal = segmentation.applyLocalPlaneFit(pointCloudObj.polyData, pickedPoint, searchRadius=0.1, searchRadiusEnd=0.2) obj = vis.showPolyData(planePoints, 'local plane fit', color=[0,1,0]) obj.setProperty('Point Size', 7) fields = segmentation.makePolyDataFields(obj.polyData) pose = transformUtils.poseFromTransform(fields.frame) desc = dict(classname='BoxAffordanceItem', Name='local plane', Dimensions=list(fields.dims), pose=pose) box = segmentation.affordanceManager.newAffordanceFromDescription(desc)
def onFrameModified(obj): pos, quat = transformUtils.poseFromTransform(obj.transform) pc = ik.WorldPositionConstraint(robot, leftHandBodyId, np.zeros((3, )), pos, pos, tspan) qc = ik.WorldQuatConstraint(robot, leftHandBodyId, quat, 0.0, tspan) solveAndDraw([qsc, pc, qc] + footConstraints)
def checkGraspFrame(inputGraspFrame, side): ''' Return True if the given grasp frame matches the grasp frame of the teleop robot model's current pose, else False. ''' pose = teleopJointController.q teleopGraspFrame = ikPlanner.newGraspToWorldFrame( pose, side, ikPlanner.newGraspToHandFrame(side)) p1, q1 = transformUtils.poseFromTransform(inputGraspFrame) p2, q2 = transformUtils.poseFromTransform(teleopGraspFrame) try: np.testing.assert_allclose(p1, p2, rtol=1e-3) np.testing.assert_allclose(q1, q2, rtol=1e-3) return True except AssertionError: return False
def spawnBoxAffordanceAtFrame(self, boxFrame): print 'spawning switch box affordance' dimensions = [0.08, 0.19, 0.25] depth = dimensions[0] boxFrame.PreMultiply() boxFrame.Translate(depth/2.0, 0.0, 0.0) pose = transformUtils.poseFromTransform(boxFrame) desc = dict(classname='BoxAffordanceItem', Name='Switch Box', Dimensions=dimensions, pose=pose, Color=[0,1,0]) self.boxAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) self.updateReachFrame()
def onSpawnMesh(self): d = DebugData() d.addArrow((0,0,0), (0,0,0.3)) pd = d.getPolyData() meshId = affordanceitems.MeshAffordanceItem.getMeshManager().add(pd) pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='MeshAffordanceItem', Name='mesh', Filename=meshId, uuid=newUUID(), pose=pose) return self.affordanceManager.newAffordanceFromDescription(desc)
def get_object_pose_relative_to_gripper(self): T_world_object = self._object.actor.GetUserTransform() T_world_gripper = self._robotStateModel.getLinkFrame( self._gripper_link_frame_name) T_gripper_object = transformUtils.concatenateTransforms( [T_world_object, T_world_gripper.GetLinearInverse()]) print(transformUtils.poseFromTransform(T_gripper_object)) return T_gripper_object
def default(self, obj): if isinstance(obj, vtk.vtkTransform): pos, quat = transformUtils.poseFromTransform(obj) return OrderedDict(position=pos, quaternion=quat) elif isinstance(obj, fieldcontainer.FieldContainer): d = OrderedDict() d['class'] = type(obj).__name__ for key in obj._fields: d[key] = getattr(obj, key) return d return nje.NumpyConvertEncoder.default(self, obj)
def loadSDF(self, filename): sdf = pysdf.SDF(file=filename) for model in sdf.world.models: for link in model.links: if hasattr(link, "submodels"): if len(link.submodels) > 0: print model.name + " - This is an articulated object - SKIPPING!" break for col in link.collisions: t1 = transformUtils.getTransformFromNumpy(model.pose) t2 = transformUtils.getTransformFromNumpy(link.pose) t3 = transformUtils.getTransformFromNumpy(col.pose) t = t1 t.PreMultiply() t.Concatenate(t2) t.PreMultiply() t.Concatenate(t3) p = transformUtils.poseFromTransform(t) name = model.name if len(link.name) > 0 and link.name != model.name: name += "-" + link.name if len(col.name) > 0 and len(link.collisions) > 1: name += "-" + col.name if col.geometry_type == "mesh": print "Mesh geometry is unsupported - SKIPPING!" if col.geometry_type == "image": print "image geometry is unsupported - SKIPPING!" if col.geometry_type == "height_map": print "Height map geometry is unsupported - SKIPPING!" if col.geometry_type == "plane": print "Plane geometry is unsupported - SKIPPING!" if col.geometry_type == "sphere": print "Sphere geometry is unsupported - SKIPPING!" if col.geometry_type == "box": desc = dict( classname="BoxAffordanceItem", Name=name, uuid=newUUID(), pose=p, Color=[0.8, 0.8, 0.8], Dimensions=map(float, col.geometry_data["size"].split(" ")), ) self.affordanceManager.newAffordanceFromDescription(desc) if col.geometry_type == "cylinder": desc = dict( classname="CylinderAffordanceItem", Name=name, uuid=newUUID(), pose=p, Color=[0.8, 0.8, 0.8], Radius=float(col.geometry_data["radius"]), Length=float(col.geometry_data["length"]), ) self.affordanceManager.newAffordanceFromDescription(desc)
def __init__(self): pose = transformUtils.poseFromTransform(vtk.vtkTransform()) self.pointcloud = ioUtils.readPolyData(director.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp') self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None) segmentation.makeMovable(self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([ 1.0, 0. , 0. , 0.0]))) self.originFrame = self.pointcloudPD.getChildFrame() t = transformUtils.transformFromPose(array([-4.39364111, -0.51507392, -0.73125563]), array([ 0.93821625, 0. , 0. , -0.34604951])) self.valveWalkFrame = vis.updateFrame(t, 'ValveWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-3.31840048, 0.36408685, -0.67413123]), array([ 0.93449475, 0. , 0. , -0.35597691])) self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.24553758, -0.52990939, -0.73255338]), array([ 0.93697004, 0. , 0. , -0.34940972])) self.drillWalkFrame = vis.updateFrame(t, 'DrillWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.51306835, -0.92994004, -0.74173541 ]), array([-0.40456572, 0. , 0. , 0.91450893])) self.drillWallWalkFarthestSafeFrame = vis.updateFrame(t, 'DrillWallWalkFarthestSafe', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.5314524 , -0.27401861, -0.71302976]), array([ 0.98691519, 0. , 0. , -0.16124022])) self.drillWallWalkBackFrame = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-1.16122318, 0.04723203, -0.67493468]), array([ 0.93163145, 0. , 0. , -0.36340451])) self.surprisePreWalkFrame = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.5176186 , -1.00151554, -0.70650799]), array([ 0.84226497, 0. , 0. , -0.53906374])) self.surpriseWalkFrame = vis.updateFrame(t, 'SurpriseWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.69100097, -0.43713269, -0.68495922]), array([ 0.98625075, 0. , 0. , -0.16525575])) self.surpriseWalkBackFrame = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 0.65827322, -0.08028796, -0.77370834]), array([ 0.94399977, 0. , 0. , -0.3299461 ])) self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 5.47126425, -0.09790393, -0.70504679]), array([ 1., 0., 0., 0.])) self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) self.frameSync = vis.FrameSync() self.frameSync.addFrame(self.originFrame) self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
def testGetCameraTransforms(self): vis.updateFrame(vtk.vtkTransform(), "origin frame", view=self.views['background']) for viewType, view in self.views.iteritems(): print "\n\nviewType: ", viewType cameraTransform = director_utils.getCameraTransform(view.camera()) pos, quat = transformUtils.poseFromTransform(cameraTransform) print "pos: ", pos print "quat: ", quat vis.updateFrame(cameraTransform, viewType + "_frame")
def publishCorrection(self, channel='POSE_YAW_LOCK'): pelvisToWorld = self.getPelvisEstimate() position, quat = transformUtils.poseFromTransform(pelvisToWorld) msg = lcmbot.pose_t() msg.utime = robotStateJointController.lastRobotStateMessage.utime msg.pos = [0.0, 0.0, 0.0] msg.orientation = quat.tolist() lcmUtils.publish(channel, msg)
def getPlanningFrame(self): platformToWorld = self.platform.getChildFrame().transform worldToPlatform = platformToWorld.GetLinearInverse() f = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel) footPosition = f.GetPosition() footPosInPlatformFrame = worldToPlatform.TransformPoint(footPosition) planFramePosInPlatformFrame = self.dimensions/2.0 planFramePosInPlatformFrame[1] = footPosInPlatformFrame[1] planFramePosInWorld = platformToWorld.TransformPoint(planFramePosInPlatformFrame) # now we want to find the homogeneous transform for the planning Frame _,quat = transformUtils.poseFromTransform(platformToWorld) self.planToWorld = transformUtils.transformFromPose(planFramePosInWorld,quat)
def onFrameModified(obj): pos, quat = transformUtils.poseFromTransform(obj.transform) pc = ik.WorldPositionConstraint(robot, leftHandBodyId, np.zeros((3,)), pos, pos, tspan) qc = ik.WorldQuatConstraint(robot, leftHandBodyId, quat, 0.0, tspan) solveAndDraw([qsc, pc, qc] + footConstraints)
def onSpawnMesh(self): d = DebugData() d.addArrow((0, 0, 0), (0, 0, 0.3)) pd = d.getPolyData() meshId = affordanceitems.MeshAffordanceItem.getMeshManager().add(pd) pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='MeshAffordanceItem', Name='mesh', Filename=meshId, uuid=newUUID(), pose=pose) return self.affordanceManager.newAffordanceFromDescription(desc)
def spawnBoxAffordanceAtFrame(self, boxFrame): print('spawning switch box affordance') dimensions = [0.08, 0.19, 0.25] depth = dimensions[0] boxFrame.PreMultiply() boxFrame.Translate(depth / 2.0, 0.0, 0.0) pose = transformUtils.poseFromTransform(boxFrame) desc = dict(classname='BoxAffordanceItem', Name='Switch Box', Dimensions=dimensions, pose=pose, Color=[0, 1, 0]) self.boxAffordance = segmentation.affordanceManager.newAffordanceFromDescription( desc) self.updateReachFrame()
def spawnRunningBoardAffordance(self): boxDimensions = [0.4, 1.0, 0.05] stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False) boxFrame = transformUtils.copyFrame(stanceFrame) boxFrame.PreMultiply() boxFrame.Translate(0.0, 0.0, -boxDimensions[2]/2.0) box = om.findObjectByName('running board') if not box: pose = transformUtils.poseFromTransform(boxFrame) desc = dict(classname='BoxAffordanceItem', Name='running board', Dimensions=boxDimensions, pose=pose) box = self.robotSystem.affordanceManager.newAffordanceFromDescription(desc) return box
def onLocalPlaneFit(): planePoints, normal = segmentation.applyLocalPlaneFit( pointCloudObj.polyData, pickedPoint, searchRadius=0.1, searchRadiusEnd=0.2) obj = vis.showPolyData(planePoints, 'local plane fit', color=[0, 1, 0]) obj.setProperty('Point Size', 7) fields = segmentation.makePolyDataFields(obj.polyData) pose = transformUtils.poseFromTransform(fields.frame) desc = dict(classname='BoxAffordanceItem', Name='local plane', Dimensions=list(fields.dims), pose=pose) box = segmentation.affordanceManager.newAffordanceFromDescription(desc)
def poseFromTransform(transform): pos, quat = transformUtils.poseFromTransform(transform) pos = pos.tolist() quat = quat.tolist() d = dict() d['translation'] = dict() d['translation']['x'] = pos[0] d['translation']['y'] = pos[1] d['translation']['z'] = pos[2] d['quaternion'] = dict() d['quaternion']['w'] = quat[0] d['quaternion']['x'] = quat[1] d['quaternion']['y'] = quat[2] d['quaternion']['z'] = quat[3] return d
def positionMessageFromFrame(transform): ''' Given a vtkTransform, returns an lcmdrc.position_t message ''' pos, wxyz = transformUtils.poseFromTransform(transform) trans = lcmdrc.vector_3d_t() trans.x, trans.y, trans.z = pos quat = lcmdrc.quaternion_t() quat.w, quat.x, quat.y, quat.z = wxyz pose = lcmdrc.position_3d_t() pose.translation = trans pose.rotation = quat return pose
def loadSDF(self, filename): sdf = pysdf.SDF(file=filename) for model in sdf.world.models: for link in model.links: if hasattr(link, 'submodels'): if len(link.submodels)>0: print model.name+' - This is an articulated object - SKIPPING!' break for col in link.collisions: t1=transformUtils.getTransformFromNumpy(model.pose) t2=transformUtils.getTransformFromNumpy(link.pose) t3=transformUtils.getTransformFromNumpy(col.pose) t=t1 t.PreMultiply() t.Concatenate(t2) t.PreMultiply() t.Concatenate(t3) p = transformUtils.poseFromTransform(t) name = model.name color = col.color[0:3] if col.color is not None else [0.8,0.8,0.8] if len(link.name)>0 and link.name != model.name: name+='-'+link.name if len(col.name)>0 and len(link.collisions)>1: name+='-'+col.name if col.geometry_type=='mesh': desc = dict(classname='MeshAffordanceItem', Name=name, uuid=newUUID(), pose=p, Color=color, Filename=col.geometry_data['uri'], Scale=map(float, col.geometry_data['scale'].split(' '))) self.affordanceManager.newAffordanceFromDescription(desc) if col.geometry_type=='image': print 'image geometry is unsupported - SKIPPING!' if col.geometry_type=='height_map': print 'Height map geometry is unsupported - SKIPPING!' if col.geometry_type=='plane': print 'Plane geometry is unsupported - SKIPPING!' if col.geometry_type=='sphere': print 'Sphere geometry is unsupported - SKIPPING!' if col.geometry_type=='box': desc = dict(classname='BoxAffordanceItem', Name=name, uuid=newUUID(), pose=p, Color=color, Dimensions=map(float, col.geometry_data['size'].split(' '))) self.affordanceManager.newAffordanceFromDescription(desc) if col.geometry_type=='cylinder': desc = dict(classname='CylinderAffordanceItem', Name=name, uuid=newUUID(), pose=p, Color=color, Radius=float(col.geometry_data['radius']), Length = float(col.geometry_data['length'])) self.affordanceManager.newAffordanceFromDescription(desc)
def _getCommands(self, commands, constraintNames, suffix): assert self.linkName varName = 'quat_constraint%s' % suffix constraintNames.append(varName) quat = self.quaternion if isinstance(quat, vtk.vtkTransform): _, quat = poseFromTransform(quat) formatArgs = dict(varName=varName, robotArg=self.robotArg, tspan=self.getTSpanString(), linkName=self.linkName, quat=self.toColumnVectorString(quat), #tolerance=repr(math.sin(math.radians(self.angleToleranceInDegrees))**2)) tolerance=repr(math.radians(self.angleToleranceInDegrees))) commands.append( '{varName} = WorldQuatConstraint({robotArg}, links.{linkName}, ' '{quat}, {tolerance}, {tspan});' ''.format(**formatArgs))
def _getCommands(self, commands, constraintNames, suffix): assert self.linkName varName = 'gaze_orient_constraint%s' % suffix constraintNames.append(varName) quat = self.quaternion if isinstance(quat, vtk.vtkTransform): _, quat = poseFromTransform(quat) formatArgs = dict(varName=varName, robotArg=self.robotArg, tspan=self.getTSpanString(), linkName=self.linkName, quat=self.toColumnVectorString(quat), axis=self.toColumnVectorString(self.axis), coneThreshold=repr(self.coneThreshold), threshold=repr(self.threshold)) commands.append( '{varName} = WorldGazeOrientConstraint({robotArg}, links.{linkName}, {axis}, ' '{quat}, {coneThreshold}, {threshold}, {tspan});' ''.format(**formatArgs))
def toPositionQuaternionString(pose): if isinstance(pose, vtk.vtkTransform): pos, quat = poseFromTransform(pose) pose = np.hstack((pos, quat)) assert pose.shape == (7,) return ConstraintBase.toColumnVectorString(pose)
def getTargetPose(self): return transformUtils.poseFromTransform(self.targetFrame.transform)
def moveHandModelToFrame(model, frame): pos, quat = transformUtils.poseFromTransform(frame) rpy = transformUtils.quaternionToRollPitchYaw(quat) pose = np.hstack((pos, rpy)) model.model.setJointPositions(pose, ['base_x', 'base_y', 'base_z', 'base_roll', 'base_pitch', 'base_yaw'])
def getFrameToOriginTransform(self, t): tCopy = transformUtils.copyFrame(t) tCopy.PostMultiply() tCopy.Concatenate(self.polaris.originFrame.transform.GetLinearInverse()) print transformUtils.poseFromTransform(tCopy) return tCopy
def __init__(self): self.aprilTagSubsciber = lcmUtils.addSubscriber('APRIL_TAG_TO_CAMERA_LEFT', lcmbotcore.rigid_transform_t, self.onAprilTag) pose = transformUtils.poseFromTransform(vtk.vtkTransform()) desc = dict(classname='MeshAffordanceItem', Name='polaris', Filename='software/models/polaris/polaris_cropped.vtp', pose=pose) self.pointcloudAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) self.originFrame = self.pointcloudAffordance.getChildFrame() self.originToAprilTransform = transformUtils.transformFromPose(np.array([-0.038508 , -0.00282131, -0.01000079]), np.array([ 9.99997498e-01, -2.10472556e-03, -1.33815696e-04, 7.46246794e-04])) # offset for . . . who knows why # t = transformUtils.transformFromPose(np.array([ 0.14376024, 0.95920689, 0.36655712]), np.array([ 0.28745842, 0.90741428, -0.28822068, 0.10438304])) t = transformUtils.transformFromPose(np.array([ 0.10873244, 0.93162364, 0.40509084]), np.array([ 0.32997378, 0.88498408, -0.31780588, 0.08318602])) self.leftFootEgressStartFrame = vis.updateFrame(t, 'left foot start', scale=0.2,visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.265, 0.874, 0.274]), np.array([ 0.35290731, 0.93443693, -0.04181263, 0.02314636])) self.leftFootEgressMidFrame = vis.updateFrame(t, 'left foot mid', scale=0.2,visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.54339115, 0.89436275, 0.26681047]), np.array([ 0.34635985, 0.93680077, -0.04152008, 0.02674412])) self.leftFootEgressOutsideFrame = vis.updateFrame(t, 'left foot outside', scale=0.2,visible=True, parent=self.pointcloudAffordance) # pose = [np.array([-0.78962299, 0.44284877, -0.29539116]), np.array([ 0.54812954, 0.44571517, -0.46063251, 0.53731713])] #old location # pose = [np.array([-0.78594663, 0.42026626, -0.23248139]), np.array([ 0.54812954, 0.44571517, -0.46063251, 0.53731713])] # updated location pose = [np.array([-0.78594663, 0.42026626, -0.23248139]), np.array([ 0.53047159, 0.46554963, -0.48086192, 0.52022615])] # update orientation desc = dict(classname='CapsuleRingAffordanceItem', Name='Steering Wheel', uuid=newUUID(), pose=pose, Color=[1, 0, 0], Radius=float(0.18), Segments=20) self.steeringWheelAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) pose = [np.array([-0.05907324, 0.80460545, 0.45439687]), np.array([ 0.14288327, 0.685944 , -0.703969 , 0.11615873])] desc = dict(classname='BoxAffordanceItem', Name='pedal', Dimensions=[0.12, 0.33, 0.04], pose=pose, Color=[0,1,0]) self.pedalAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) # t = transformUtils.transformFromPose(np.array([ 0.04045136, 0.96565326, 0.25810111]), # np.array([ 0.26484648, 0.88360091, -0.37065556, -0.10825996])) # t = transformUtils.transformFromPose(np.array([ -4.34908919e-04, 9.24901627e-01, 2.65614116e-01]), # np.array([ 0.25022251, 0.913271 , -0.32136359, -0.00708626])) t = transformUtils.transformFromPose(np.array([ 0.0384547 , 0.89273742, 0.24140762]), np.array([ 0.26331831, 0.915796 , -0.28055337, 0.11519963])) self.leftFootPedalSwingFrame = vis.updateFrame(t,'left foot pedal swing', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([-0.9012598 , -0.05709763, 0.34897024]), np.array([ 0.03879584, 0.98950919, 0.03820214, 0.13381721])) self.leftFootDrivingFrame = vis.updateFrame(t,'left foot driving', scale=0.2, visible=True, parent=self.pointcloudAffordance) # t = transformUtils.transformFromPose(np.array([-0.12702725, 0.92068409, 0.27209386]), # np.array([ 0.2062255 , 0.92155886, -0.30781119, 0.11598529])) # t = transformUtils.transformFromPose(np.array([-0.14940375, 0.90347275, 0.23812658]), # np.array([ 0.27150909, 0.91398724, -0.28877386, 0.0867167 ])) # t = transformUtils.transformFromPose(np.array([-0.17331227, 0.87879312, 0.25645152]), # np.array([ 0.26344489, 0.91567196, -0.28089824, 0.11505581])) # self.leftFootDrivingKneeInFrame = vis.updateFrame(t,'left foot driving knee in', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([-0.12702725, 0.92068409, 0.27209386]), np.array([ 0.2062255 , 0.92155886, -0.30781119, 0.11598529])) self.leftFootDrivingKneeInFrame = vis.updateFrame(t,'left foot driving knee in', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([-0.13194951, 0.89871423, 0.24956246]), np.array([ 0.21589082, 0.91727326, -0.30088849, 0.14651633])) self.leftFootOnPedal = vis.updateFrame(t,'left foot on pedal', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.17712239, 0.87619935, 0.27001509]), np.array([ 0.33484372, 0.88280787, -0.31946488, 0.08044963])) self.leftFootUpFrame = vis.updateFrame(t,'left foot up frame', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.47214939, -0.04856998, 0.01375837]), np.array([ 6.10521653e-03, 4.18621358e-04, 4.65520611e-01, 8.85015882e-01])) self.rightHandGrabFrame = vis.updateFrame(t,'right hand grab bar', scale=0.2, visible=True, parent=self.pointcloudAffordance) self.frameSync = vis.FrameSync() self.frameSync.addFrame(self.originFrame) self.frameSync.addFrame(self.pointcloudAffordance.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.leftFootEgressStartFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootEgressMidFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootEgressOutsideFrame, ignoreIncoming=True) self.frameSync.addFrame(self.steeringWheelAffordance.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.pedalAffordance.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.leftFootPedalSwingFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootDrivingFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootDrivingKneeInFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootOnPedal, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootUpFrame, ignoreIncoming=True) self.frameSync.addFrame(self.rightHandGrabFrame, ignoreIncoming=True)
def poseFromAffordance(aff): t = aff.getChildFrame().transform position, quat = transformUtils.poseFromTransform(t) rpy = transformUtils.rollPitchYawFromTransform(t) return urdf.Pose(position, rpy)
def getCropBoxFrame(self): transform = self.cube_vis.getChildFrame().transform pos, quat = transformUtils.poseFromTransform(transform) print (pos,quat)
def testIkPlan(): ikPlanner = robotSystem.ikPlanner ikPlanner.pushToMatlab = False constraints = buildConstraints() poses = ce.getPlanPoses(constraints, ikPlanner) robot = loadRigidBodyTree(getIkUrdfFilename()) #for i, body in enumerate(robot.bodies): # print i, body.linkname tspan = np.array([1.0, 1.0]) handLinkNames = [ikPlanner.getHandLink('left'), ikPlanner.getHandLink('right')] footLinkNames = [robotSystem.directorConfig['leftFootLink'], robotSystem.directorConfig['rightFootLink']] leftHandBodyId = robot.findLink(str(handLinkNames[0])).body_index rightHandBodyId = robot.findLink(str(handLinkNames[1])).body_index leftFootBodyId = robot.findLink(str(footLinkNames[0])).body_index rightFootBodyId = robot.findLink(str(footLinkNames[1])).body_index lfootContactPts, rfootContactPts = robotSystem.footstepsDriver.getContactPts() footConstraints = [] for linkName in footLinkNames: t = robotSystem.robotStateModel.getLinkFrame(linkName) pos, quat = transformUtils.poseFromTransform(t) pc = ik.WorldPositionConstraint(robot, robot.findLink(str(linkName)).body_index, np.zeros((3,)), pos, pos, tspan) qc = ik.WorldQuatConstraint(robot, robot.findLink(str(linkName)).body_index, quat, 0.0, tspan) footConstraints.append(pc) footConstraints.append(qc) qsc = ik.QuasiStaticConstraint(robot, tspan) qsc.setActive(True) qsc.setShrinkFactor(0.5) qsc.addContact([leftFootBodyId], lfootContactPts.transpose()) qsc.addContact([rightFootBodyId], rfootContactPts.transpose()) #q_seed = robot.getZeroConfiguration() q_seed = robotSystem.robotStateJointController.getPose('q_nom').copy() # todo, joint costs, and other options options = ik.IKoptions(robot) def solveAndDraw(constraints): results = ik.inverseKinSimple(robot, q_seed, q_seed, constraints, options) pose = results.q_sol robotSystem.robotStateJointController.setPose('pose_end', pose) def onFrameModified(obj): pos, quat = transformUtils.poseFromTransform(obj.transform) pc = ik.WorldPositionConstraint(robot, leftHandBodyId, np.zeros((3,)), pos, pos, tspan) qc = ik.WorldQuatConstraint(robot, leftHandBodyId, quat, 0.0, tspan) solveAndDraw([qsc, pc, qc] + footConstraints) frameObj = vis.updateFrame(robotSystem.robotStateModel.getLinkFrame(ikPlanner.getHandLink('left')), 'left hand frame') frameObj.setProperty('Edit', True) frameObj.setProperty('Scale', 0.2) frameObj.connectFrameModified(onFrameModified)
def updateIKConstraints(self): startPoseName = 'reach_start' startPose = self.planningUtils.getPlanningStartPose() self.ikPlanner.addPose(startPose, startPoseName) constraints = [] constraints.append(self.ikPlanner.createQuasiStaticConstraint()) constraints.append(self.ikPlanner.createLockedNeckPostureConstraint(startPoseName)) # Get base constraint if self.getComboText(self.ui.baseComboBox) == 'Fixed': constraints.append(self.ikPlanner.createLockedBasePostureConstraint(startPoseName, lockLegs=False)) self.ikPlanner.setBaseLocked(True) elif self.getComboText(self.ui.baseComboBox) == 'XYZ only': constraints.append(self.ikPlanner.createXYZMovingBasePostureConstraint(startPoseName)) constraints.append(self.ikPlanner.createKneePostureConstraint(self.kneeJointLimits)) elif self.getComboText(self.ui.baseComboBox) == 'Limited': constraints.append(self.ikPlanner.createMovingBaseSafeLimitsConstraint()) constraints.append(self.ikPlanner.createKneePostureConstraint(self.kneeJointLimits)) self.ikPlanner.setBaseLocked(False) # Get back constraint if self.getComboText(self.ui.backComboBox) == 'Fixed': constraints.append(self.ikPlanner.createLockedBackPostureConstraint(startPoseName)) self.ikPlanner.setBackLocked(True) elif self.getComboText(self.ui.backComboBox) == 'Limited': constraints.append(self.ikPlanner.createMovingBackLimitedPostureConstraint()) self.ikPlanner.setBackLocked(False) # Get feet constraint if self.getComboText(self.ui.feetComboBox) == 'Fixed': constraints.append(self.ikPlanner.createFixedLinkConstraints(startPoseName, self.ikPlanner.leftFootLink, tspan=[0.0, 1.0], lowerBound=-0.0001*np.ones(3), upperBound=0.0001*np.ones(3), angleToleranceInDegrees=0.1)) constraints.append(self.ikPlanner.createFixedLinkConstraints(startPoseName, self.ikPlanner.rightFootLink, tspan=[0.0, 1.0], lowerBound=-0.0001*np.ones(3), upperBound=0.0001*np.ones(3), angleToleranceInDegrees=0.1)) elif self.getComboText(self.ui.feetComboBox) == 'Sliding': constraints.extend(self.ikPlanner.createSlidingFootConstraints(startPoseName)[:2]) constraints.extend(self.ikPlanner.createSlidingFootConstraints(startPoseName)[2:]) # Ensure the end-pose's relative distance between two feet is the same as start pose [pos_left, quat_left] = transformUtils.poseFromTransform(self.robotStateModel.getLinkFrame(self.ikPlanner.leftFootLink)) [pos_right, quat_right] = transformUtils.poseFromTransform(self.robotStateModel.getLinkFrame(self.ikPlanner.rightFootLink)) dist = npla.norm(pos_left - pos_right) constraints.append(ikconstraints.PointToPointDistanceConstraint(bodyNameA=self.ikPlanner.leftFootLink, bodyNameB=self.ikPlanner.rightFootLink, lowerBound=np.array([dist - 0.0001]), upperBound=np.array([dist + 0.0001]))) sides = [] if self.getReachHand() == 'Left': sides.append('left') elif self.getReachHand() == 'Right': sides.append('right') elif self.getReachHand() == 'Both': sides.append('left') sides.append('right') if self.getComboText(self.ui.otherHandComboBox) == 'Fixed': if not 'left' in sides: self.ikPlanner.setArmLocked('left', True) constraints.append(self.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName)) if not 'right' in sides: self.ikPlanner.setArmLocked('right', True) constraints.append(self.ikPlanner.createLockedRightArmPostureConstraint(startPoseName)) for side in sides: linkName = self.ikPlanner.getHandLink(side) graspToHand = self.ikPlanner.newPalmOffsetGraspToHandFrame(side, self.palmOffsetDistance) graspToWorld = self.getGoalFrame(linkName) p, q = self.ikPlanner.createPositionOrientationGraspConstraints(side, graspToWorld, graspToHand) p.tspan = [1.0, 1.0] q.tspan = [1.0, 1.0] constraints.extend([p, q]) constraints.append(self.ikPlanner.createActiveEndEffectorConstraint(linkName, self.ikPlanner.getPalmPoint(side))) self.constraintSet = ikplanner.ConstraintSet(self.ikPlanner, constraints, 'reach_end', startPoseName)
def onSpawnRing(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='CapsuleRingAffordanceItem', Name='ring', uuid=newUUID(), pose=pose) return self.affordanceManager.newAffordanceFromDescription(desc)
def toQuaternion(quat): if isinstance(quat, vtk.vtkTransform): _, quat = poseFromTransform(quat) assert quat.shape == (4,) return quat
def onSpawnCylinder(self): pose = transformUtils.poseFromTransform(self.getSpawnFrame()) desc = dict(classname='CylinderAffordanceItem', Name='cylinder', uuid=newUUID(), pose=pose) return self.affordanceManager.newAffordanceFromDescription(desc)
def getPose(self): childFrame = self.getChildFrame() t = childFrame.transform if childFrame else vtk.vtkTransform() return transformUtils.poseFromTransform(t)