def processSnippet(): obj = om.getOrCreateContainer('continuous') om.getOrCreateContainer('cont debug', obj) if (continuouswalkingDemo.processContinuousStereo): polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/block_snippet_stereo.vtp')) polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) else: polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/block_snippet.vtp')) vis.updatePolyData( polyData, 'walking snapshot trimmed', parent='continuous') standingFootName = cwdemo.ikPlanner.leftFootLink standingFootFrame = robotStateModel.getLinkFrame(standingFootName) vis.updateFrame(standingFootFrame, standingFootName, parent='continuous', visible=False) # Step 2: find all the surfaces in front of the robot (about 0.75sec) clusters = segmentation.findHorizontalSurfaces(polyData) if (clusters is None): print("No cluster found, stop walking now!") return # Step 3: find the corners of the minimum bounding rectangles blocks,match_idx,groundPlane = cwdemo.extractBlocksFromSurfaces(clusters, standingFootFrame) footsteps = cwdemo.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame) cwdemo.drawFittedSteps(footsteps)
def processSingleBlock(robotStateModel, whichFile=0): if (whichFile == 0): polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/terrain_simple_ihmc.vtp')) vis.updatePolyData( polyData, 'terrain_simple_ihmc.vtp', parent='continuous') else: polyData = ioUtils.readPolyData(os.path.join(dataDir, 'terrain/terrain_stairs_ihmc.vtp')) cwdemo.chosenTerrain = 'stairs' cwdemo.supportContact = lcmdrc.footstep_params_t.SUPPORT_GROUPS_MIDFOOT_TOE vis.updatePolyData( polyData, 'terrain_stairs_ihmc.vtp', parent='continuous') if drcargs.args().directorConfigFile.find('atlas') != -1: standingFootName = 'l_foot' else: standingFootName = 'leftFoot' standingFootFrame = robotStateModel.getLinkFrame(standingFootName) vis.updateFrame(standingFootFrame, standingFootName, parent='continuous', visible=False) # Step 1: filter the data down to a box in front of the robot: polyData = cwdemo.getRecedingTerrainRegion(polyData, footstepsDriver.getFeetMidPoint(robotStateModel)) # Step 2: find all the surfaces in front of the robot (about 0.75sec) clusters = segmentation.findHorizontalSurfaces(polyData) if (clusters is None): print "No cluster found, stop walking now!" return # Step 3: find the corners of the minimum bounding rectangles blocks,match_idx,groundPlane = cwdemo.extractBlocksFromSurfaces(clusters, standingFootFrame) footsteps = cwdemo.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame) cwdemo.drawFittedSteps(footsteps)
def updateReachFrame(self): graspFrame = transformUtils.copyFrame(self.pinchToBox) boxFrame = om.findObjectByName('Switch Box').getChildFrame().transform graspFrame.PostMultiply() graspFrame.Concatenate(boxFrame) vis.updateFrame(graspFrame, 'pinch reach frame', scale=0.2)
def visualize(self): """ Visualize :return: """ self._clear_visualization() # model vis_dict = self._vis_dict # template_poly_data = self._poser_visualizer.get_template_poly_data(self._config['object_name']) # vis_dict['template'] = vis.updatePolyData(template_poly_data, 'template goal', parent=self._vis_container, # color=[0, 1, 0]) # vis_dict['template'].actor.SetUserTransform(self.T_W_model_target) # # # # observation # obs_poly_data = self._poser_visualizer.get_observation_poly_data(self._config['object_name']) # vis_dict['observation'] = vis.updatePolyData(obs_poly_data, 'observation goal', parent=self._vis_container, # color=[1, 0, 0]) # vis_dict['observation'].actor.SetUserTransform(self._T_W_obs_desired) vis.updateFrame(self.T_W_G, "Initial Grasp Frame", scale=0.15, parent=self._vis_container) vis.updateFrame(self.T_W_Gn, 'Place Gripper Frame', scale=0.15, parent=self._vis_container)
def onVisualizeButton(self): print("viz", self.ui.comboBox.currentText) frame = self.getFrameFromCombo() #vis.showFrame(frame, self.getSelectedGoalName(), parent="navigation", scale=0.35, visible=True) #vis.updateFrame(frame, self.getSelectedGoalName(), parent="navigation") vis.updateFrame(frame, "Current Goal", parent="navigation")
def pickupObject(self): endEffectorFrame = self.tfBuffer.lookup_transform(self.config['base_frame_id'], self.config['end_effector_frame_id'], rospy.Time(0)) eeFrameVtk = spartanUtils.transformFromROSTransformMsg(endEffectorFrame.transform) eeFrameVtk.PostMultiply() eeFrameVtk.Translate(0,0,self.config['pick_up_distance']) vis.updateFrame( eeFrameVtk, 'pickup frame') poseStamped = self.vtkFrameToPoseMsg(eeFrameVtk) speed = 10 # joint degrees per second params = self.getParamsForCurrentLocation() above_table_pre_grasp = params['poses']['above_table_pre_grasp'] ik_response = self.robotService.runIK(poseStamped, seedPose=above_table_pre_grasp, nominalPose=above_table_pre_grasp) if ik_response.success: self.robotService.moveToJointPosition(ik_response.joint_state.position, maxJointDegreesPerSecond=self.graspingParams['speed']['slow']) stow_pose = self.getStowPose() # move to above_table_pre_grasp self.robotService.moveToJointPosition(above_table_pre_grasp, maxJointDegreesPerSecond=self.graspingParams['speed']['stow']) # move to stow_pose self.robotService.moveToJointPosition(stow_pose, maxJointDegreesPerSecond=self.graspingParams['speed']['stow']) self.gripperDriver.sendOpenGripperCommand() rospy.sleep(0.5) # move to above_table_pre_grasp self.robotService.moveToJointPosition(above_table_pre_grasp, maxJointDegreesPerSecond=self.graspingParams['speed']['fast'])
def initRobotTeleopCameraFrame(robotSystem): endEffectorToWorld = robotSystem.teleopRobotModel.getLinkFrame( 'iiwa_link_ee') frameObj = vis.updateFrame(endEffectorToWorld, 'iiwa_link_ee_teleop', parent='debug', scale=0.15, visible=False) cameraToEE = getCameraToKukaEndEffectorFrame() cameraToWorld = transformUtils.concatenateTransforms( [cameraToEE, endEffectorToWorld]) obj = vis.updateFrame(cameraToWorld, 'camera frame teleop', parent=frameObj, scale=0.15, visible=False) frameObj.getFrameSync().addFrame(obj, ignoreIncoming=True) def updateFrame(model): EEToWorld = model.getLinkFrame('iiwa_link_ee') frameObj = vis.updateFrame(EEToWorld, 'iiwa_link_ee_teleop', parent='debug', scale=0.15, visible=False) # setup the callback so it updates when we move the teleop model robotSystem.teleopRobotModel.connectModelChanged(updateFrame)
def onVisualizeButton(self): print "viz", self.ui.comboBox.currentText frame = self.getFrameFromCombo() # vis.showFrame(frame, self.getSelectedGoalName(), parent="navigation", scale=0.35, visible=True) # vis.updateFrame(frame, self.getSelectedGoalName(), parent="navigation") vis.updateFrame(frame, "Current Goal", parent="navigation")
def setupImage(self, imageNumber, saveColorLabeledImages=False, saveLabeledImages=False, savePoses=False): """ Loads the given imageNumber as background. Also updates the poses of the objects to match the image """ baseName = utils.getImageBasenameFromImageNumber(imageNumber, self.pathDict) imageFilename = baseName + "_rgb.png" if not os.path.exists(imageFilename): return False utimeFile = open(baseName + "_utime.txt", 'r') utime = int(utimeFile.read()) # update camera transform cameraToCameraStart = self.getCameraPoseAtUTime(utime) t = cameraToCameraStart vis.updateFrame(t, 'camera pose') setCameraTransform(self.view.camera(), t) cameraPose = om.findObjectByName('camera pose') cameraPose.setProperty('Visible', False) if saveColorLabeledImages: self.loadBackgroundImage(imageFilename) #self.view.forceRender() # render it again self.captureColorImage(baseName + '_color_labels.png') if saveLabeledImages: self.captureLabelImage(baseName + '_labels.png') if savePoses: self.saveObjectPoses(imageFilename.replace("_rgb.png", "_labels.png"), cameraToCameraStart, baseName) return True
def setupImage(self, imageNumber, saveLabeledImages=False): """ Loads the given imageNumber as background. Also updates the poses of the objects to match the image """ baseName = cutils.getImageBasenameFromImageNumber( imageNumber, self.pathDict) imageFilename = baseName + "_rgb.png" if not os.path.exists(imageFilename): return False utimeFile = open(baseName + "_utime.txt", 'r') utime = int(utimeFile.read()) # update camera transform cameraToCameraStart = self.getCameraPoseAtUTime(utime) t = transformUtils.concatenateTransforms( [cameraToCameraStart, cutils.getDefaultCameraToWorld()]) vis.updateFrame(t, 'camera pose') setCameraTransform(self.globalsDict['view'].camera(), t) cameraPose = self.globalsDict['om'].findObjectByName('camera pose') cameraPose.setProperty('Visible', False) self.loadBackgroundImage(imageFilename) self.globalsDict['view'].forceRender() # render it again if saveLabeledImages: self.saveImages(baseName) return True
def showCameraFrame(self): p = om.getOrCreateContainer('Camera Transform') cameraToWorld = self.getCameraToWorld() vis.updateFrame(cameraToWorld, 'depth camera frame', scale=0.15, parent=p)
def processSnippet(): obj = om.getOrCreateContainer("continuous") om.getOrCreateContainer("cont debug", obj) if continuouswalkingDemo.processContinuousStereo: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/block_snippet_stereo.vtp")) polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) else: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/block_snippet.vtp")) vis.updatePolyData(polyData, "walking snapshot trimmed", parent="continuous") standingFootName = "l_foot" standingFootFrame = robotStateModel.getLinkFrame(standingFootName) vis.updateFrame(standingFootFrame, standingFootName, parent="continuous", visible=False) # Step 2: find all the surfaces in front of the robot (about 0.75sec) clusters = segmentation.findHorizontalSurfaces(polyData) if clusters is None: print "No cluster found, stop walking now!" return # Step 3: find the corners of the minimum bounding rectangles blocks, match_idx, groundPlane = cwdemo.extractBlocksFromSurfaces(clusters, standingFootFrame) footsteps = cwdemo.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame) cwdemo.drawFittedSteps(footsteps)
def computeBoardStanceFrame(self): objectTransform = transformUtils.copyFrame( self.graspLeftFrame.transform) self.relativeStanceTransform = transformUtils.copyFrame( transformUtils.frameFromPositionAndRPY(self.relativeStanceXYZ, self.relativeStanceRPY)) robotStance = self.b.computeRobotStanceFrame( objectTransform, self.relativeStanceTransform) self.stanceFrame = vis.updateFrame(robotStance, 'board stance', parent=self.affordance, visible=False, scale=0.2) self.stanceFrame.addToView(app.getDRCView()) objectTransform = transformUtils.copyFrame( self.graspLeftFrame.transform) self.relativeAsymmetricStanceTransform = transformUtils.copyFrame( transformUtils.frameFromPositionAndRPY( self.relativeAsymmetricStanceXYZ, self.relativeStanceRPY)) robotAsymmetricStance = self.b.computeRobotStanceFrame( objectTransform, self.relativeAsymmetricStanceTransform) self.asymmetricStanceFrame = vis.updateFrame(robotAsymmetricStance, 'board Asymmetric stance', parent=self.affordance, visible=False, scale=0.2) self.asymmetricStanceFrame.addToView(app.getDRCView())
def makeTargetCameraTransform(self, rotateX=-40, rotateY=0, translateZ=-0.8, visualize=True): t = transformUtils.copyFrame(self.tableFrame.transform) t.PreMultiply() t.RotateX(rotateX) t.RotateY(rotateY) t.Translate((0, 0, translateZ)) if visualize: name = 'target camera frame' if om.findObjectByName(name) is None: frame = vis.updateFrame(t, name, scale=0.15) cameraFrustum = CameraFrustumVisualizer(self.imageManager, self.cameraName, frame, verbose=False, visFolder=frame) self.frustumVis['target camera'] = cameraFrustum else: frame = vis.updateFrame(t, name, scale=0.15) self.targetCameraFrame = frame return t
def computeBoardReachFrames(self): ''' Reach ~10cm short of the grasp frame ''' reachLeftXYZ = copy.deepcopy(self.graspLeftHandFrameXYZ) reachLeftXYZ[0] = reachLeftXYZ[0] - self.reachDepth reachLeftRPY = copy.deepcopy(self.graspLeftHandFrameRPY) tl = transformUtils.frameFromPositionAndRPY(reachLeftXYZ, reachLeftRPY) tl.Concatenate(self.frame.transform) self.reachLeftFrame = vis.updateFrame(tl, 'reach left frame', parent=self.affordance, visible=False, scale=0.2) self.reachLeftFrame.addToView(app.getDRCView()) reachRightXYZ = copy.deepcopy(self.graspRightHandFrameXYZ) reachRightXYZ[0] = reachRightXYZ[0] - self.reachDepth reachRightRPY = copy.deepcopy(self.graspRightHandFrameRPY) tr = transformUtils.frameFromPositionAndRPY(reachRightXYZ, reachRightRPY) tr.Concatenate(self.frame.transform) self.reachRightFrame = vis.updateFrame(tr, 'reach right frame', parent=self.affordance, visible=False, scale=0.2) self.reachRightFrame.addToView(app.getDRCView())
def updateCameraPoseFromRobotKinematics(model): endEffectorToWorld = model.getLinkFrame('iiwa_link_ee') vis.updateFrame(endEffectorToWorld, 'iiwa_link_ee', parent='debug', scale=0.15, visible=False)
def processSingleBlock(robotStateModel, whichFile=0): if whichFile == 0: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/terrain_simple_ihmc.vtp")) vis.updatePolyData(polyData, "terrain_simple_ihmc.vtp", parent="continuous") else: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/terrain_flagstones_ihmc.vtp")) cwdemo.chosenTerrain = "stairs" cwdemo.supportContact = lcmdrc.footstep_params_t.SUPPORT_GROUPS_MIDFOOT_TOE vis.updatePolyData(polyData, "terrain_stairs_ihmc.vtp", parent="continuous") if drcargs.args().directorConfigFile.find("atlas") != -1: standingFootName = "l_foot" else: standingFootName = "leftFoot" standingFootFrame = robotStateModel.getLinkFrame(standingFootName) vis.updateFrame(standingFootFrame, standingFootName, parent="continuous", visible=False) # Step 1: filter the data down to a box in front of the robot: polyData = cwdemo.getRecedingTerrainRegion(polyData, footstepsDriver.getFeetMidPoint(robotStateModel)) # Step 2: find all the surfaces in front of the robot (about 0.75sec) clusters = segmentation.findHorizontalSurfaces(polyData) if clusters is None: print "No cluster found, stop walking now!" return # Step 3: find the corners of the minimum bounding rectangles blocks, match_idx, groundPlane = cwdemo.extractBlocksFromSurfaces(clusters, standingFootFrame) footsteps = cwdemo.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame) cwdemo.drawFittedSteps(footsteps)
def removePlaneAndBeyond(polyData, expectedNormal=[1,0,0], filterRange=[-np.inf, -0.03], whichAxis=1, whichAxisLetter='y', percentile = 95): yvalues = vnp.getNumpyFromVtk(polyData, 'Points')[:, whichAxis] backY = np.percentile(yvalues, percentile) if ( percentile > 50): searchRegion = segmentation.thresholdPoints(polyData, whichAxisLetter, [backY - 0.1, np.inf]) else: searchRegion = segmentation.thresholdPoints(polyData, whichAxisLetter, [-np.inf, backY + 0.1]) vis.updatePolyData(searchRegion, 'search region', parent="segmentation", colorByName=whichAxisLetter, visible=False) # find the plane of the back wall, remove it and the points behind it: _, origin, normal = segmentation.applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, returnOrigin=True) points = vnp.getNumpyFromVtk(polyData, 'Points') dist = np.dot(points - origin, normal) vnp.addNumpyToVtk(polyData, dist, 'dist_to_plane') backFrame = transformUtils.getTransformFromOriginAndNormal(origin, normal, normalAxis=2) vis.updateFrame(backFrame, 'back frame', parent='segmentation', scale=0.15 , visible=False) vis.updatePolyData(polyData, 'dist to back', parent='segmentation', visible=False) polyData = segmentation.thresholdPoints(polyData, 'dist_to_plane', filterRange) vis.updatePolyData(polyData, 'back off and all', parent='segmentation', visible=False) return polyData
def visFootstepFrames(self): for n in xrange(1,self.numFootsteps + 1): fToPlan = self.footstepsToPlan[n-1] fToWorld = fToPlan fToWorld.PostMultiply() fToWorld.Concatenate(self.planToWorld) frameName = 'step_'+str(n)+'ToWorld' vis.updateFrame(fToWorld,frameName)
def visFootstepFrames(self): for n in range(1, self.numFootsteps + 1): fToPlan = self.footstepsToPlan[n - 1] fToWorld = fToPlan fToWorld.PostMultiply() fToWorld.Concatenate(self.planToWorld) frameName = 'step_' + str(n) + 'ToWorld' vis.updateFrame(fToWorld, frameName)
def moveToGraspFrame(self, graspFrame): preGraspFrame = transformUtils.concatenateTransforms( [self.preGraspToGraspTransform, self.graspFrame]) vis.updateFrame(preGraspFrame, 'pre grasp frame', scale=0.15) vis.updateFrame(graspFrame, 'grasp frame', scale=0.15) self.moveToFrame(preGraspFrame) # rospy.sleep(1.0) self.moveToFrame(graspFrame)
def planFootOut(self, startPose=None, finalFootHeight=0.05): if startPose is None: startPose = self.getPlanningStartPose() startPoseName = 'q_egress_start' self.robotSystem.ikPlanner.addPose(startPose, startPoseName) endPoseName = 'q_egress_end' utorsoFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('utorso', startPose) finalLeftFootFrame = self.computeLeftFootOverPlatformFrame(startPose, finalFootHeight) constraints = [] constraints.extend(self.createUtorsoGazeConstraints([0.0, 1.0])) constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=False, rightFootEnabled=True, pelvisEnabled=False, shrinkFactor=0.01)) constraints.append(self.robotSystem.ikPlanner.createMovingBaseSafeLimitsConstraint()) constraints.append(self.robotSystem.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName)) constraints.append(self.robotSystem.ikPlanner.createLockedRightArmPostureConstraint(startPoseName)) #constraints.append(self.robotSystem.ikPlanner.createLockedBackPostureConstraint(startPoseName)) constraints.append(self.robotSystem.ikPlanner.createKneePostureConstraint([0.7, 2.5])) constraints.append(self.robotSystem.ikPlanner.createFixedLinkConstraints(startPoseName, 'r_foot')) constraints.extend(self.createLeftFootPoseConstraint(finalLeftFootFrame, tspan=[1,1])) constraintSet = ConstraintSet(self.robotSystem.ikPlanner, constraints, endPoseName, startPoseName) constraintSet.ikParameters = IkParameters(usePointwise=True, maxBaseRPYDegreesPerSecond=10, rescaleBodyNames=['l_foot'], rescaleBodyPts=[0.0, 0.0, 0.0], maxBodyTranslationSpeed=self.maxFootTranslationSpeed) #constraintSet.seedPoseName = 'q_start' #constraintSet.nominalPoseName = 'q_start' constraintSet.runIk() footFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('l_foot', startPose) t = transformUtils.frameFromPositionAndRPY([0, 0, self.polaris.leftFootEgressOutsideFrame.transform.GetPosition()[2]-footFrame.GetPosition()[2]], [0, 0, 0]) liftFrame = transformUtils.concatenateTransforms([footFrame, t]) vis.updateFrame(liftFrame, 'lift frame') c = ik.WorldFixedOrientConstraint() c.linkName = 'l_foot' c.tspan = [0.0, 0.1, 0.2] constraints.append(c) constraints.extend(self.createLeftFootPoseConstraint(liftFrame, tspan=[0.2, 0.2])) constraints.extend(self.createLeftFootPoseConstraint(self.polaris.leftFootEgressMidFrame, tspan=[0.5, 0.5])) constraints.extend(self.createLeftFootPoseConstraint(self.polaris.leftFootEgressOutsideFrame, tspan=[0.8, 0.8])) #plan = constraintSet.planEndPoseGoal(feetOnGround=False) keyFramePlan = constraintSet.runIkTraj() poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan) ts = [poseTimes[0]] supportsList = [['r_foot']] plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, False) self.addPlan(plan) return plan
def run(self): poseName = self.properties.getProperty('Pose name') if poseName == 'EST_ROBOT_STATE': pose = robotSystem.robotStateJointController.q.copy() else: pose = robotSystem.ikPlanner.jointController.getPose(poseName) robotModel = robotSystem.ikPlanner.getRobotModelAtPose(pose) footFrame = robotSystem.footstepsDriver.getFeetMidPoint(robotModel) vis.updateFrame(footFrame, self.properties.getProperty('Frame output name'), scale=0.2)
def showRGBCameraFrame(self): p = om.getOrCreateContainer('Camera Transform') linkFrame = self.robotStateModel.getLinkFrame( self.referenceLinkName) # this is a vtkTransform object cameraToWorld = transformUtils.concatenateTransforms( [self.rgbCameraToLinkTransform, linkFrame]) vis.updateFrame(cameraToWorld, 'rgb camera frame', scale=0.15, parent=p)
def drawModel(self, model): modelFolder = om.getOrCreateContainer(model.name, parentObj=self.getRootFolder()) markerFolder = om.getOrCreateContainer('markers', parentObj=modelFolder) modelName = model.name markerObjects = markerFolder.children() if len(markerObjects) != model.nummarkers: for obj in markerObjects: om.removeFromObjectModel(obj) modelColor = vis.getRandomColor() markerObjects = self.createMarkerObjects(model.nummarkers, markerFolder, modelName, modelColor) self.models[modelName] = markerObjects if len(markerObjects): modelColor = markerObjects[0].getProperty('Color') for i, marker in enumerate(model.markers): xyz = np.array(marker.xyz) * self.unitConversion markerFrame = vtk.vtkTransform() markerFrame.Translate(xyz) markerObjects[i].getChildFrame().copyFrame(markerFrame) if self.drawEdges: d = DebugData() for m1 in model.markers: xyz = np.array(m1.xyz) * self.unitConversion for m2 in model.markers: xyz2 = np.array(m2.xyz) * self.unitConversion d.addLine(xyz, xyz2) edges = shallowCopy(d.getPolyData()) vis.updatePolyData(edges, modelName + ' edges', color=modelColor, parent=modelFolder) else: edgesObj = om.findObjectByName(modelName + ' edges') om.removeFromObjectModel(edgesObj) computeModelFrame = True if computeModelFrame: pos = np.array(model.segments[0].T) * self.unitConversion rpy = np.array(model.segments[0].A) modelFrame = transformUtils.frameFromPositionAndRPY( pos, np.degrees(rpy)) vis.updateFrame(modelFrame, modelName + ' frame', parent=modelFolder, scale=0.1)
def test(self): self.showCameraFrame() opticalFrame = self.getCameraToWorld() bodyFrame = CameraTransform.transformOpticalFrameToBodyFrame( opticalFrame) p = om.getOrCreateContainer('Camera Transform') vis.updateFrame(bodyFrame, 'depth camera body frame', scale=0.15, parent=p)
def testGetCameraTransforms(self): vis.updateFrame(vtk.vtkTransform(), "origin frame", view=self.views['background']) for viewType, view in self.views.iteritems(): print "\n\nviewType: ", viewType cameraTransform = director_utils.getCameraTransform(view.camera()) pos, quat = transformUtils.poseFromTransform(cameraTransform) print "pos: ", pos print "quat: ", quat vis.updateFrame(cameraTransform, viewType + "_frame")
def __init__(self): pose = transformUtils.poseFromTransform(vtk.vtkTransform()) self.pointcloud = ioUtils.readPolyData(director.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp') self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None) segmentation.makeMovable(self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([ 1.0, 0. , 0. , 0.0]))) self.originFrame = self.pointcloudPD.getChildFrame() t = transformUtils.transformFromPose(array([-4.39364111, -0.51507392, -0.73125563]), array([ 0.93821625, 0. , 0. , -0.34604951])) self.valveWalkFrame = vis.updateFrame(t, 'ValveWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-3.31840048, 0.36408685, -0.67413123]), array([ 0.93449475, 0. , 0. , -0.35597691])) self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.24553758, -0.52990939, -0.73255338]), array([ 0.93697004, 0. , 0. , -0.34940972])) self.drillWalkFrame = vis.updateFrame(t, 'DrillWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.51306835, -0.92994004, -0.74173541 ]), array([-0.40456572, 0. , 0. , 0.91450893])) self.drillWallWalkFarthestSafeFrame = vis.updateFrame(t, 'DrillWallWalkFarthestSafe', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-2.5314524 , -0.27401861, -0.71302976]), array([ 0.98691519, 0. , 0. , -0.16124022])) self.drillWallWalkBackFrame = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-1.16122318, 0.04723203, -0.67493468]), array([ 0.93163145, 0. , 0. , -0.36340451])) self.surprisePreWalkFrame = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.5176186 , -1.00151554, -0.70650799]), array([ 0.84226497, 0. , 0. , -0.53906374])) self.surpriseWalkFrame = vis.updateFrame(t, 'SurpriseWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([-0.69100097, -0.43713269, -0.68495922]), array([ 0.98625075, 0. , 0. , -0.16525575])) self.surpriseWalkBackFrame = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 0.65827322, -0.08028796, -0.77370834]), array([ 0.94399977, 0. , 0. , -0.3299461 ])) self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) t = transformUtils.transformFromPose(array([ 5.47126425, -0.09790393, -0.70504679]), array([ 1., 0., 0., 0.])) self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD) self.frameSync = vis.FrameSync() self.frameSync.addFrame(self.originFrame) self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True) self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True) self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True) self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
def computeBoardStanceFrame(self): objectTransform = transformUtils.copyFrame( self.graspLeftFrame.transform ) self.relativeStanceTransform = transformUtils.copyFrame( transformUtils.frameFromPositionAndRPY( self.relativeStanceXYZ , self.relativeStanceRPY ) ) robotStance = self.b.computeRobotStanceFrame( objectTransform, self.relativeStanceTransform ) self.stanceFrame = vis.updateFrame(robotStance, 'board stance', parent=self.affordance, visible=False, scale=0.2) self.stanceFrame.addToView(app.getDRCView()) objectTransform = transformUtils.copyFrame( self.graspLeftFrame.transform ) self.relativeAsymmetricStanceTransform = transformUtils.copyFrame( transformUtils.frameFromPositionAndRPY( self.relativeAsymmetricStanceXYZ , self.relativeStanceRPY ) ) robotAsymmetricStance = self.b.computeRobotStanceFrame( objectTransform, self.relativeAsymmetricStanceTransform ) self.asymmetricStanceFrame = vis.updateFrame(robotAsymmetricStance, 'board Asymmetric stance', parent=self.affordance, visible=False, scale=0.2) self.asymmetricStanceFrame.addToView(app.getDRCView())
def moveToRobot(self, robotModel): handLinkToWorld = robotModel.getLinkFrame(self.handLinkName) t = vtk.vtkTransform() t.PostMultiply() t.Concatenate(self.modelToPalm) t.Concatenate(self.palmToHandLink) t.Concatenate(handLinkToWorld) self.moveHandModelToFrame(self.handModel, t) vis.updateFrame(self.getPalmToWorldTransform(), '%s palm' % self.side)
def initRobotKinematicsCameraFrame(robotSystem): endEffectorToWorld = robotSystem.robotStateModel.getLinkFrame('iiwa_link_ee') frameObj = vis.updateFrame(endEffectorToWorld, 'iiwa_link_ee', parent='debug', scale=0.15, visible=False) cameraToEE = getCameraToKukaEndEffectorFrame() cameraToWorld = transformUtils.concatenateTransforms([cameraToEE, endEffectorToWorld]) obj = vis.updateFrame(cameraToWorld, 'camera frame', parent=frameObj, scale=0.15) frameObj.getFrameSync().addFrame(obj, ignoreIncoming=True) def onCameraFrameModified(f): setCameraToWorld(f.transform) obj.connectFrameModified(onCameraFrameModified)
def computeBoardGraspFrames(self): t = transformUtils.frameFromPositionAndRPY( self.graspLeftHandFrameXYZ , self.graspLeftHandFrameRPY ) t_copy = transformUtils.copyFrame(t) t_copy.Concatenate(self.frame.transform) self.graspLeftFrame = vis.updateFrame(t_copy, 'grasp left frame', parent=self.affordance, visible=False, scale=0.2) self.graspLeftFrame.addToView(app.getDRCView()) t = transformUtils.frameFromPositionAndRPY( self.graspRightHandFrameXYZ , self.graspRightHandFrameRPY ) t_copy = transformUtils.copyFrame(t) t_copy.Concatenate(self.frame.transform) self.graspRightFrame = vis.updateFrame(t_copy, 'grasp right frame', parent=self.affordance, visible=False, scale=0.2) self.graspRightFrame.addToView(app.getDRCView())
def onStartMappingButton(self): msg = map_command_t() msg.timestamp = getUtime() msg.command = 0 lcmUtils.publish('KINECT_MAP_COMMAND', msg) utime = self.queue.getCurrentImageTime('KINECT_RGB') self.cameraToLocalInit = vtk.vtkTransform() self.queue.getTransform('KINECT_RGB', 'local', utime, self.cameraToLocalInit) vis.updateFrame(self.cameraToLocalInit, 'initial cam' ) print "starting mapping", utime print self.cameraToLocalInit.GetPosition() print self.cameraToLocalInit.GetOrientation()
def selectLink(self, displayPoint, view): if not self.enabled(): return False robotModel, _ = vis.findPickedObject(displayPoint, view) try: robotModel.model.getLinkNameForMesh except AttributeError: return False model = robotModel.model pickedPoint, _, polyData = vis.pickProp(displayPoint, view) linkName = model.getLinkNameForMesh(polyData) if not linkName: return False fadeValue = 1.0 if linkName == self.selectedLink else 0.05 for name in model.getLinkNames(): linkColor = model.getLinkColor(name) linkColor.setAlphaF(fadeValue) model.setLinkColor(name, linkColor) if linkName == self.selectedLink: self.selectedLink = None vis.hideCaptionWidget() om.removeFromObjectModel( om.findObjectByName("selected link frame")) else: self.selectedLink = linkName linkColor = model.getLinkColor(self.selectedLink) linkColor.setAlphaF(1.0) model.setLinkColor(self.selectedLink, linkColor) vis.showCaptionWidget( robotModel.getLinkFrame(self.selectedLink).GetPosition(), self.selectedLink, view=view, ) vis.updateFrame( robotModel.getLinkFrame(self.selectedLink), "selected link frame", scale=0.2, parent=robotModel, ) return True
def handle_message(self, msg): if set(self._link_name_published) != set(msg.link_name): # Removes the folder completely. self.remove_folder() self._link_name_published = msg.link_name folder = self._get_folder() for i in range(0, msg.num_links): name = msg.link_name[i] transform = transformUtils.transformFromPose( msg.position[i], msg.quaternion[i]) # `vis.updateFrame` will either create or update the frame # according to its name within its parent folder. vis.updateFrame(transform, name, parent=folder, scale=0.1)
def run(self): aff = self.getSelectedAffordance() affFrame = aff.getChildFrame().transform groundFrame = self.getGroundFrame().transform projectedXYZ = np.hstack([affFrame.GetPosition()[0:2], groundFrame.GetPosition()[2]]) result = transformUtils.copyFrame(affFrame) result.Translate(projectedXYZ - np.array(result.GetPosition())) outputName = self.properties.getProperty('Frame output name') outputName = outputName or '%s ground frame' % aff.getProperty('Name') vis.updateFrame(result, outputName, scale=0.2)
def doFTDraw(self, unusedrobotstate): frames = [] fts = [] vis_names = [] if (hasattr(self.robotStateJointController, 'lastRobotStateMessage') and self.robotStateJointController.lastRobotStateMessage): if self.draw_right: rft = np.array(self.robotStateJointController.lastRobotStateMessage.force_torque.r_hand_force + self.robotStateJointController.lastRobotStateMessage.force_torque.r_hand_torque) rft[0] = -1.*rft[0] # right FT looks to be rotated 180deg around the z axis rft[1] = -1.*rft[1] rft[3] = -1.*rft[3] rft[4] = -1.*rft[4] rft -= self.ft_right_bias fts.append(rft) frames.append(self.robotStateModel.getLinkFrame('r_hand_force_torque')) vis_names.append('ft_right') if self.draw_left: lft = np.array(self.robotStateJointController.lastRobotStateMessage.force_torque.l_hand_force + self.robotStateJointController.lastRobotStateMessage.force_torque.l_hand_torque) lft -= self.ft_left_bias fts.append(lft) frames.append(self.robotStateModel.getLinkFrame('l_hand_force_torque')) vis_names.append('ft_left') for i in range(0, len(frames)): frame = frames[i] ft = fts[i] offset = [0., 0., 0.] p1 = frame.TransformPoint(offset) scale = 1.0/25.0 # todo add slider for this? scalet = 1.0 / 2.5 p2f = frame.TransformPoint(offset + ft[0:3]*scale) p2t = frame.TransformPoint(offset + ft[3:6]) normalt = (np.array(p2t) - np.array(p1)) normalt = normalt / np.linalg.norm(normalt) d = DebugData() # force if np.linalg.norm(np.array(p2f) - np.array(p1)) < 0.1: d.addLine(p1, p2f, color=[1.0, 0.0, 0.0]) else: d.addArrow(p1, p2f, color=[1.,0.,0.]) # torque if self.draw_torque: d.addCircle(p1, normalt, scalet*np.linalg.norm(ft[3:6])) # frame (largely for debug) vis.updateFrame(frame, vis_names[i]+'frame', view=self.view, parent='wristft', visible=False, scale=0.2) vis.updatePolyData(d.getPolyData(), vis_names[i], view=self.view, parent='wristft')
def planTargetReach(self): # A single one shot gaze-constrained reach: place xyz at goal and align y-axis of hand with x-axis of goal worldToTargetFrame = vis.updateFrame(self.targetFrame, 'gaze goal', visible=False, scale=0.2, parent=om.getOrCreateContainer('affordances')) self.initConstraintSet() self.addConstraintForTargetFrame(worldToTargetFrame, 1) self.planTrajectory()
def spawnFootstepFrame(self): # should snap this to boxFrame = om.findObjectByName('Switch Box').getChildFrame().transform goalFrame = transformUtils.copyFrame(self.footToBox) goalFrame.PostMultiply() goalFrame.Concatenate(boxFrame) # translate goal frame to match current robot height stanceFrame = self.getStanceFrame() stanceHeight = stanceFrame.GetPosition()[2] goalHeight = goalFrame.GetPosition()[2] goalFrame.PreMultiply() goalFrame.Translate(0.0, 0.0, stanceHeight - goalHeight) vis.updateFrame(goalFrame, 'switch box stance frame', scale=0.2)
def updateLinkFrame(self, robotModel, linkName, create=True): linkFrameName = '%s frame' % linkName if not create and not om.findObjectByName(linkFrameName): return t = robotModel.getLinkFrame(linkName) return vis.updateFrame(t, linkFrameName, scale=0.2, visible=False, parent=self.robotModel)
def _handle_message(self, msg): if set(self._link_name_published) != set(msg.link_name): # Removes the folder completely. # TODO(eric.cousineau): Consider only removing frames that are in # the set difference. om.removeFromObjectModel(om.findObjectByName(self._folder_name)) self._link_name_published = msg.link_name # Recreates folder. folder = om.getOrCreateContainer(self._folder_name) for i in range(0, msg.num_links): name = msg.link_name[i] transform = transformUtils.transformFromPose( msg.position[i], msg.quaternion[i]) # `vis.updateFrame` will either create or update the frame # according to its name within its parent folder. vis.updateFrame(transform, name, parent=folder, scale=0.1);
def drawModel(self, model): modelFolder = om.getOrCreateContainer(model.name, parentObj=self.getRootFolder()) markerFolder = om.getOrCreateContainer("markers", parentObj=modelFolder) modelName = model.name markerObjects = markerFolder.children() if len(markerObjects) != model.nummarkers: for obj in markerObjects: om.removeFromObjectModel(obj) modelColor = vis.getRandomColor() markerObjects = self.createMarkerObjects(model.nummarkers, markerFolder, modelName, modelColor) self.models[modelName] = markerObjects if len(markerObjects): modelColor = markerObjects[0].getProperty("Color") for i, marker in enumerate(model.markers): xyz = np.array(marker.xyz) * self.unitConversion markerFrame = vtk.vtkTransform() markerFrame.Translate(xyz) markerObjects[i].getChildFrame().copyFrame(markerFrame) if self.drawEdges: d = DebugData() for m1 in model.markers: xyz = np.array(m1.xyz) * self.unitConversion for m2 in model.markers: xyz2 = np.array(m2.xyz) * self.unitConversion d.addLine(xyz, xyz2) edges = shallowCopy(d.getPolyData()) vis.updatePolyData(edges, modelName + " edges", color=modelColor, parent=modelFolder) else: edgesObj = om.findObjectByName(modelName + " edges") om.removeFromObjectModel(edgesObj) computeModelFrame = True if computeModelFrame: pos = np.array(model.segments[0].T) * self.unitConversion rpy = np.array(model.segments[0].A) modelFrame = transformUtils.frameFromPositionAndRPY(pos, np.degrees(rpy)) vis.updateFrame(modelFrame, modelName + " frame", parent=modelFolder, scale=0.1)
def computeBoardReachFrames(self): ''' Reach ~10cm short of the grasp frame ''' reachLeftXYZ = copy.deepcopy( self.graspLeftHandFrameXYZ ) reachLeftXYZ[0] = reachLeftXYZ[0] - self.reachDepth reachLeftRPY = copy.deepcopy ( self.graspLeftHandFrameRPY ) tl = transformUtils.frameFromPositionAndRPY( reachLeftXYZ , reachLeftRPY ) tl.Concatenate(self.frame.transform) self.reachLeftFrame = vis.updateFrame(tl, 'reach left frame', parent=self.affordance, visible=False, scale=0.2) self.reachLeftFrame.addToView(app.getDRCView()) reachRightXYZ = copy.deepcopy( self.graspRightHandFrameXYZ ) reachRightXYZ[0] = reachRightXYZ[0] - self.reachDepth reachRightRPY = copy.deepcopy ( self.graspRightHandFrameRPY ) tr = transformUtils.frameFromPositionAndRPY( reachRightXYZ , reachRightRPY ) tr.Concatenate(self.frame.transform) self.reachRightFrame = vis.updateFrame(tr, 'reach right frame', parent=self.affordance, visible=False, scale=0.2) self.reachRightFrame.addToView(app.getDRCView())
def planGraspLift(self): t3 = transformUtils.frameFromPositionAndRPY( [0.00,0.0, 0.04] , [0,0,0] ) liftTransform = transformUtils.copyFrame(self.board.graspFrame.transform) liftTransform.Concatenate(t3) self.board.liftFrame = vis.updateFrame(liftTransform, 'lift frame', parent="affordances", visible=False, scale=0.2) startPose = self.getPlanningStartPose() constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, self.graspingHand, self.board.liftFrame, lockBase=self.lockBase, lockBack=self.lockBack) endPose, info = constraintSet.runIk() liftPlan = constraintSet.runIkTraj() self.addPlan(liftPlan)
def selectLink(self, displayPoint, view): if not self.enabled(): return False robotModel, _ = vis.findPickedObject(displayPoint, view) try: robotModel.model.getLinkNameForMesh except AttributeError: return False model = robotModel.model pickedPoint, _, polyData = vis.pickProp(displayPoint, view) linkName = model.getLinkNameForMesh(polyData) if not linkName: return False fadeValue = 1.0 if linkName == self.selectedLink else 0.05 for name in model.getLinkNames(): linkColor = model.getLinkColor(name) linkColor.setAlphaF(fadeValue) model.setLinkColor(name, linkColor) if linkName == self.selectedLink: self.selectedLink = None vis.hideCaptionWidget() om.removeFromObjectModel(om.findObjectByName('selected link frame')) else: self.selectedLink = linkName linkColor = model.getLinkColor(self.selectedLink) linkColor.setAlphaF(1.0) model.setLinkColor(self.selectedLink, linkColor) vis.showCaptionWidget(robotModel.getLinkFrame(self.selectedLink).GetPosition(), self.selectedLink, view=view) vis.updateFrame(robotModel.getLinkFrame(self.selectedLink), 'selected link frame', scale=0.2, parent=robotModel) return True
def run(self): inputFrame = self.getInputFrame() translation = self.properties.getProperty('Translation') rpy = self.properties.getProperty('Rotation') offset = transformUtils.frameFromPositionAndRPY(translation, rpy) offset.PostMultiply() offset.Concatenate(transformUtils.copyFrame(inputFrame.transform)) outputFrame = vis.updateFrame(offset, self.properties.getProperty('Frame output name'), scale=0.2, parent=inputFrame.parent()) if not hasattr(inputFrame, 'frameSync'): inputFrame.frameSync = vis.FrameSync() inputFrame.frameSync.addFrame(inputFrame) inputFrame.frameSync.addFrame(outputFrame, ignoreIncoming=True)
def onNewWalkingGoal(self, walkingGoal=None): walkingGoal = walkingGoal or self.newWalkingGoalFrame(self.robotModel) frameObj = vis.updateFrame(walkingGoal, 'walking goal', parent='planning', scale=0.25) frameObj.setProperty('Edit', True) rep = frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) frameObj.widget.HandleRotationEnabledOff() if self.placer: self.placer.stop() terrain = om.findObjectByName('HEIGHT_MAP_SCENE') if terrain: pos = np.array(frameObj.transform.GetPosition()) polyData = filterUtils.removeNonFinitePoints(terrain.polyData) if polyData.GetNumberOfPoints(): polyData = segmentation.labelDistanceToLine(polyData, pos, pos+[0,0,1]) polyData = segmentation.thresholdPoints(polyData, 'distance_to_line', [0.0, 0.1]) if polyData.GetNumberOfPoints(): pos[2] = np.nanmax(vnp.getNumpyFromVtk(polyData, 'Points')[:,2]) frameObj.transform.Translate(pos - np.array(frameObj.transform.GetPosition())) d = DebugData() d.addSphere((0,0,0), radius=0.03) handle = vis.showPolyData(d.getPolyData(), 'walking goal terrain handle', parent=frameObj, visible=True, color=[1,1,0]) handle.actor.SetUserTransform(frameObj.transform) self.placer = PlacerWidget(app.getCurrentRenderView(), handle, terrain) def onFramePropertyModified(propertySet, propertyName): if propertyName == 'Edit': if propertySet.getProperty(propertyName): self.placer.start() else: self.placer.stop() frameObj.properties.connectPropertyChanged(onFramePropertyModified) onFramePropertyModified(frameObj, 'Edit') frameObj.connectFrameModified(self.onWalkingGoalModified) self.onWalkingGoalModified(frameObj)
def makeStepFrames(stepFrames, relativeFrame=None, showFrames=False): frames = [] for i, stepFrame in enumerate(stepFrames): stepFrame = transformUtils.frameFromPositionAndRPY(stepFrame, [0,0,0]) stepFrame.PostMultiply() if relativeFrame: stepFrame.Concatenate(relativeFrame) if showFrames: obj = vis.updateFrame(stepFrame, 'step frame %d' % i, parent='step frames', scale=0.2) stepFrame = obj.transform frames.append(stepFrame) return frames
def __init__(self): self.aprilTagSubsciber = lcmUtils.addSubscriber('APRIL_TAG_TO_CAMERA_LEFT', lcmbotcore.rigid_transform_t, self.onAprilTag) pose = transformUtils.poseFromTransform(vtk.vtkTransform()) desc = dict(classname='MeshAffordanceItem', Name='polaris', Filename='software/models/polaris/polaris_cropped.vtp', pose=pose) self.pointcloudAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) self.originFrame = self.pointcloudAffordance.getChildFrame() self.originToAprilTransform = transformUtils.transformFromPose(np.array([-0.038508 , -0.00282131, -0.01000079]), np.array([ 9.99997498e-01, -2.10472556e-03, -1.33815696e-04, 7.46246794e-04])) # offset for . . . who knows why # t = transformUtils.transformFromPose(np.array([ 0.14376024, 0.95920689, 0.36655712]), np.array([ 0.28745842, 0.90741428, -0.28822068, 0.10438304])) t = transformUtils.transformFromPose(np.array([ 0.10873244, 0.93162364, 0.40509084]), np.array([ 0.32997378, 0.88498408, -0.31780588, 0.08318602])) self.leftFootEgressStartFrame = vis.updateFrame(t, 'left foot start', scale=0.2,visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.265, 0.874, 0.274]), np.array([ 0.35290731, 0.93443693, -0.04181263, 0.02314636])) self.leftFootEgressMidFrame = vis.updateFrame(t, 'left foot mid', scale=0.2,visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.54339115, 0.89436275, 0.26681047]), np.array([ 0.34635985, 0.93680077, -0.04152008, 0.02674412])) self.leftFootEgressOutsideFrame = vis.updateFrame(t, 'left foot outside', scale=0.2,visible=True, parent=self.pointcloudAffordance) # pose = [np.array([-0.78962299, 0.44284877, -0.29539116]), np.array([ 0.54812954, 0.44571517, -0.46063251, 0.53731713])] #old location # pose = [np.array([-0.78594663, 0.42026626, -0.23248139]), np.array([ 0.54812954, 0.44571517, -0.46063251, 0.53731713])] # updated location pose = [np.array([-0.78594663, 0.42026626, -0.23248139]), np.array([ 0.53047159, 0.46554963, -0.48086192, 0.52022615])] # update orientation desc = dict(classname='CapsuleRingAffordanceItem', Name='Steering Wheel', uuid=newUUID(), pose=pose, Color=[1, 0, 0], Radius=float(0.18), Segments=20) self.steeringWheelAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) pose = [np.array([-0.05907324, 0.80460545, 0.45439687]), np.array([ 0.14288327, 0.685944 , -0.703969 , 0.11615873])] desc = dict(classname='BoxAffordanceItem', Name='pedal', Dimensions=[0.12, 0.33, 0.04], pose=pose, Color=[0,1,0]) self.pedalAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc) # t = transformUtils.transformFromPose(np.array([ 0.04045136, 0.96565326, 0.25810111]), # np.array([ 0.26484648, 0.88360091, -0.37065556, -0.10825996])) # t = transformUtils.transformFromPose(np.array([ -4.34908919e-04, 9.24901627e-01, 2.65614116e-01]), # np.array([ 0.25022251, 0.913271 , -0.32136359, -0.00708626])) t = transformUtils.transformFromPose(np.array([ 0.0384547 , 0.89273742, 0.24140762]), np.array([ 0.26331831, 0.915796 , -0.28055337, 0.11519963])) self.leftFootPedalSwingFrame = vis.updateFrame(t,'left foot pedal swing', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([-0.9012598 , -0.05709763, 0.34897024]), np.array([ 0.03879584, 0.98950919, 0.03820214, 0.13381721])) self.leftFootDrivingFrame = vis.updateFrame(t,'left foot driving', scale=0.2, visible=True, parent=self.pointcloudAffordance) # t = transformUtils.transformFromPose(np.array([-0.12702725, 0.92068409, 0.27209386]), # np.array([ 0.2062255 , 0.92155886, -0.30781119, 0.11598529])) # t = transformUtils.transformFromPose(np.array([-0.14940375, 0.90347275, 0.23812658]), # np.array([ 0.27150909, 0.91398724, -0.28877386, 0.0867167 ])) # t = transformUtils.transformFromPose(np.array([-0.17331227, 0.87879312, 0.25645152]), # np.array([ 0.26344489, 0.91567196, -0.28089824, 0.11505581])) # self.leftFootDrivingKneeInFrame = vis.updateFrame(t,'left foot driving knee in', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([-0.12702725, 0.92068409, 0.27209386]), np.array([ 0.2062255 , 0.92155886, -0.30781119, 0.11598529])) self.leftFootDrivingKneeInFrame = vis.updateFrame(t,'left foot driving knee in', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([-0.13194951, 0.89871423, 0.24956246]), np.array([ 0.21589082, 0.91727326, -0.30088849, 0.14651633])) self.leftFootOnPedal = vis.updateFrame(t,'left foot on pedal', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.17712239, 0.87619935, 0.27001509]), np.array([ 0.33484372, 0.88280787, -0.31946488, 0.08044963])) self.leftFootUpFrame = vis.updateFrame(t,'left foot up frame', scale=0.2, visible=True, parent=self.pointcloudAffordance) t = transformUtils.transformFromPose(np.array([ 0.47214939, -0.04856998, 0.01375837]), np.array([ 6.10521653e-03, 4.18621358e-04, 4.65520611e-01, 8.85015882e-01])) self.rightHandGrabFrame = vis.updateFrame(t,'right hand grab bar', scale=0.2, visible=True, parent=self.pointcloudAffordance) self.frameSync = vis.FrameSync() self.frameSync.addFrame(self.originFrame) self.frameSync.addFrame(self.pointcloudAffordance.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.leftFootEgressStartFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootEgressMidFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootEgressOutsideFrame, ignoreIncoming=True) self.frameSync.addFrame(self.steeringWheelAffordance.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.pedalAffordance.getChildFrame(), ignoreIncoming=True) self.frameSync.addFrame(self.leftFootPedalSwingFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootDrivingFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootDrivingKneeInFrame, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootOnPedal, ignoreIncoming=True) self.frameSync.addFrame(self.leftFootUpFrame, ignoreIncoming=True) self.frameSync.addFrame(self.rightHandGrabFrame, ignoreIncoming=True)
def showLinkFrame(linkName, model=None): frame = getLinkFrame(linkName, model) if not frame: raise Exception('Link not found: ' + linkName) return vis.updateFrame(frame, linkName, parent='link frames')