def projectDrillDemoInCamera(): q = om.findObjectByName('camera overlay') om.removeFromObjectModel(q) imageView = cameraview.views['CAMERA_LEFT'] imageView.imageActor.SetOpacity(.2) drawFrameInCamera(drillDemo.drill.frame.transform, 'drill frame',visible=False) tf = transformUtils.copyFrame( drillDemo.drill.frame.transform ) tf.PreMultiply() tf.Concatenate( drillDemo.drill.drillToButtonTransform ) drawFrameInCamera(tf, 'drill button') tf2 = transformUtils.copyFrame( tf ) tf2.PreMultiply() tf2.Concatenate( transformUtils.frameFromPositionAndRPY( [0,0,0] , [180,0,0] ) ) drawFrameInCamera(tf2, 'drill button flip') drawObjectInCamera('drill',visible=False) drawObjectInCamera('sensed pointer tip') obj = om.findObjectByName('sensed pointer tip frame') if (obj is not None): drawFrameInCamera(obj.transform, 'sensed pointer tip frame',visible=False) #drawObjectInCamera('left robotiq',visible=False) #drawObjectInCamera('right pointer',visible=False) v = imageView.view v.render()
def spawnHandAtCurrentLocation(side='left'): if (side is 'left'): tf = transformUtils.copyFrame( getLinkFrame( 'l_hand_face') ) handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'left') else: tf = transformUtils.copyFrame( getLinkFrame( 'right_pointer_tip') ) handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'right')
def computeBoardStanceFrame(self): objectTransform = transformUtils.copyFrame( self.graspLeftFrame.transform) self.relativeStanceTransform = transformUtils.copyFrame( transformUtils.frameFromPositionAndRPY(self.relativeStanceXYZ, self.relativeStanceRPY)) robotStance = self.b.computeRobotStanceFrame( objectTransform, self.relativeStanceTransform) self.stanceFrame = vis.updateFrame(robotStance, 'board stance', parent=self.affordance, visible=False, scale=0.2) self.stanceFrame.addToView(app.getDRCView()) objectTransform = transformUtils.copyFrame( self.graspLeftFrame.transform) self.relativeAsymmetricStanceTransform = transformUtils.copyFrame( transformUtils.frameFromPositionAndRPY( self.relativeAsymmetricStanceXYZ, self.relativeStanceRPY)) robotAsymmetricStance = self.b.computeRobotStanceFrame( objectTransform, self.relativeAsymmetricStanceTransform) self.asymmetricStanceFrame = vis.updateFrame(robotAsymmetricStance, 'board Asymmetric stance', parent=self.affordance, visible=False, scale=0.2) self.asymmetricStanceFrame.addToView(app.getDRCView())
def computeLeftFootOverPlatformFrame(self, startPose, height): lFoot2World = transformUtils.copyFrame(self.polaris.leftFootEgressOutsideFrame.transform) rFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('r_foot', startPose) lFoot2World = transformUtils.copyFrame(rFoot2World) lFoot2World.PreMultiply() lFoot2World.Translate([0.05, 0.26, height]) return lFoot2World
def computeBoardStanceFrame(self): objectTransform = transformUtils.copyFrame( self.graspLeftFrame.transform ) self.relativeStanceTransform = transformUtils.copyFrame( transformUtils.frameFromPositionAndRPY( self.relativeStanceXYZ , self.relativeStanceRPY ) ) robotStance = self.b.computeRobotStanceFrame( objectTransform, self.relativeStanceTransform ) self.stanceFrame = vis.updateFrame(robotStance, 'board stance', parent=self.affordance, visible=False, scale=0.2) self.stanceFrame.addToView(app.getDRCView()) objectTransform = transformUtils.copyFrame( self.graspLeftFrame.transform ) self.relativeAsymmetricStanceTransform = transformUtils.copyFrame( transformUtils.frameFromPositionAndRPY( self.relativeAsymmetricStanceXYZ , self.relativeStanceRPY ) ) robotAsymmetricStance = self.b.computeRobotStanceFrame( objectTransform, self.relativeAsymmetricStanceTransform ) self.asymmetricStanceFrame = vis.updateFrame(robotAsymmetricStance, 'board Asymmetric stance', parent=self.affordance, visible=False, scale=0.2) self.asymmetricStanceFrame.addToView(app.getDRCView())
def computeBoardGraspFrames(self): t = transformUtils.frameFromPositionAndRPY( self.graspLeftHandFrameXYZ , self.graspLeftHandFrameRPY ) t_copy = transformUtils.copyFrame(t) t_copy.Concatenate(self.frame.transform) self.graspLeftFrame = vis.updateFrame(t_copy, 'grasp left frame', parent=self.affordance, visible=False, scale=0.2) self.graspLeftFrame.addToView(app.getDRCView()) t = transformUtils.frameFromPositionAndRPY( self.graspRightHandFrameXYZ , self.graspRightHandFrameRPY ) t_copy = transformUtils.copyFrame(t) t_copy.Concatenate(self.frame.transform) self.graspRightFrame = vis.updateFrame(t_copy, 'grasp right frame', parent=self.affordance, visible=False, scale=0.2) self.graspRightFrame.addToView(app.getDRCView())
def computeRobotStanceFrame(self, objectTransform, relativeStanceTransform): ''' Given a robot model, determine the height of the ground using an XY and Yaw standoff, combined to determine the relative 6DOF standoff For a grasp or approach stance ''' groundFrame = self.computeGroundFrame(self.robotModel) groundHeight = groundFrame.GetPosition()[2] graspPosition = np.array(objectTransform.GetPosition()) graspYAxis = [0.0, 1.0, 0.0] graspZAxis = [0.0, 0.0, 1.0] objectTransform.TransformVector(graspYAxis, graspYAxis) objectTransform.TransformVector(graspZAxis, graspZAxis) xaxis = graspYAxis #xaxis = graspZAxis zaxis = [0, 0, 1] yaxis = np.cross(zaxis, xaxis) yaxis /= np.linalg.norm(yaxis) xaxis = np.cross(yaxis, zaxis) graspGroundTransform = transformUtils.getTransformFromAxes( xaxis, yaxis, zaxis) graspGroundTransform.PostMultiply() graspGroundTransform.Translate(graspPosition[0], graspPosition[1], groundHeight) robotStance = transformUtils.copyFrame(relativeStanceTransform) robotStance.Concatenate(graspGroundTransform) return robotStance
def getFrameToOriginTransform(self, t): tCopy = transformUtils.copyFrame(t) tCopy.PostMultiply() tCopy.Concatenate( self.polaris.originFrame.transform.GetLinearInverse()) print(transformUtils.poseFromTransform(tCopy)) return tCopy
def computeRobotStanceFrame(self, objectTransform, relativeStanceTransform ): ''' Given a robot model, determine the height of the ground using an XY and Yaw standoff, combined to determine the relative 6DOF standoff For a grasp or approach stance ''' groundFrame = self.computeGroundFrame(self.robotModel) groundHeight = groundFrame.GetPosition()[2] graspPosition = np.array(objectTransform.GetPosition()) graspYAxis = [0.0, 1.0, 0.0] graspZAxis = [0.0, 0.0, 1.0] objectTransform.TransformVector(graspYAxis, graspYAxis) objectTransform.TransformVector(graspZAxis, graspZAxis) xaxis = graspYAxis #xaxis = graspZAxis zaxis = [0, 0, 1] yaxis = np.cross(zaxis, xaxis) yaxis /= np.linalg.norm(yaxis) xaxis = np.cross(yaxis, zaxis) graspGroundTransform = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis) graspGroundTransform.PostMultiply() graspGroundTransform.Translate(graspPosition[0], graspPosition[1], groundHeight) robotStance = transformUtils.copyFrame( relativeStanceTransform ) robotStance.Concatenate(graspGroundTransform) return robotStance
def updateReachFrame(self): graspFrame = transformUtils.copyFrame(self.pinchToBox) boxFrame = om.findObjectByName('Switch Box').getChildFrame().transform graspFrame.PostMultiply() graspFrame.Concatenate(boxFrame) vis.updateFrame(graspFrame, 'pinch reach frame', scale=0.2)
def getPinchToPalmFrame(self): pinchToPalm = transformUtils.copyFrame(self.getPinchToHandFrame()) palmToHand = self.ikPlanner.getPalmToHandLink(side='right') pinchToPalm.PostMultiply() pinchToPalm.Concatenate(palmToHand.GetLinearInverse()) return pinchToPalm
def flipHandThumb(): handFrame = pickedObj.children()[0] t = transformUtils.copyFrame(handFrame.transform) t.PreMultiply() t.RotateY(180) handFrame.copyFrame(t) pickedObj._renderAllViews()
def makeTargetCameraTransform(self, rotateX=-40, rotateY=0, translateZ=-0.8, visualize=True): t = transformUtils.copyFrame(self.tableFrame.transform) t.PreMultiply() t.RotateX(rotateX) t.RotateY(rotateY) t.Translate((0, 0, translateZ)) if visualize: name = 'target camera frame' if om.findObjectByName(name) is None: frame = vis.updateFrame(t, name, scale=0.15) cameraFrustum = CameraFrustumVisualizer(self.imageManager, self.cameraName, frame, verbose=False, visFolder=frame) self.frustumVis['target camera'] = cameraFrustum else: frame = vis.updateFrame(t, name, scale=0.15) self.targetCameraFrame = frame return t
def promotePolyDataItem(cls, obj): parent = obj.parent() view = obj.views[0] name = obj.getProperty('Name') polyData = obj.polyData props = obj.properties._properties childFrame = obj.getChildFrame() if childFrame: t = transformUtils.copyFrame(childFrame.transform) else: t = vtk.vtkTransform() t.PostMultiply() t.Translate(filterUtils.computeCentroid(polyData)) polyData = filterUtils.transformPolyData(polyData, t.GetLinearInverse()) children = [c for c in obj.children() if c is not childFrame] meshId = cls.getMeshManager().add(polyData) om.removeFromObjectModel(obj) obj = MeshAffordanceItem(name, view) obj.setProperty('Filename', meshId) om.addToObjectModel(obj, parentObj=parent) frame = vis.addChildFrame(obj) frame.copyFrame(t) for child in children: om.addToObjectModel(child, parentObj=obj) obj.syncProperties(props) return obj
def getFootstepRelativeTransform(self): self.footstepsToPlan = [] for n in xrange(1,self.numFootsteps + 1): stepFrameName = 'step ' + str(n) + ' frame' fToWorld = transformUtils.copyFrame(om.findObjectByName(stepFrameName).transform) fToPlan = self.getTransformToPlanningFrame(fToWorld) self.footstepsToPlan.append(fToPlan)
def run(self): self.ikPlanner = robotSystem.ikPlanner side = self.properties.getPropertyEnumValue('Side').lower() self.graspingHand = side targetPoints = self.getAnnotationInputPoints() gazeTargetFrame = self.getGazeTargetFrame() self.initGazeConstraintSet(gazeTargetFrame) numberOfSamples = len(targetPoints) for i in xrange(numberOfSamples): targetPos = targetPoints[i] targetFrame = transformUtils.copyFrame(gazeTargetFrame.transform) targetFrame.Translate(targetPos - np.array(targetFrame.GetPosition())) self.appendPositionConstraintForTargetFrame(targetFrame, i+1) gazeConstraint = self.constraintSet.constraints[0] assert isinstance(gazeConstraint, ikplanner.ikconstraints.WorldGazeDirConstraint) gazeConstraint.tspan = [1.0, numberOfSamples] plan = self.constraintSet.runIkTraj() _addPlanItem(plan, '%s gaze plan' % gazeTargetFrame.getProperty('Name'), ManipulationPlanItem)
def flipHandSide(): for obj in [pickedObj] + pickedObj.children(): if not isGraspSeed(obj): continue side = 'right' if obj.side == 'left' else 'left' obj.side = side color = [1.0, 1.0, 0.0] if side == 'right': color = [0.33, 1.0, 0.0] obj.setProperty('Color', color) polyData = handFactory.getNewHandPolyData(side) obj.setPolyData(polyData) handFrame = obj.children()[0] t = transformUtils.copyFrame(handFrame.transform) t.PreMultiply() t.RotateY(180) handFrame.copyFrame(t) objName = obj.getProperty('Name') frameName = handFrame.getProperty('Name') if side == 'left': obj.setProperty('Name', objName.replace("right", "left")) handFrame.setProperty('Name', frameName.replace("right", "left")) else: obj.setProperty('Name', objName.replace("left", "right")) handFrame.setProperty('Name', frameName.replace("left", "right")) obj._renderAllViews()
def createCellLocators(self, dataDict=None): self.locatorData = {} for linkName, cellCodeArray in dataDict.iteritems(): polyData = vtk.vtkPolyData() self.robotStateModel.model.getLinkModelMesh(linkName, polyData) cellCodeArrayVTK = numpy_support.numpy_to_vtk(cellCodeArray) arrayName = "cellCodeArray" cellCodeArrayVTK.SetName(arrayName) polyData.GetCellData().AddArray(cellCodeArrayVTK) thresholdRange = [1,1] polyData = filterUtils.thresholdCells(polyData, arrayName, thresholdRange) cellData = polyData.GetCellData() normals = cellData.GetNormals() if polyData.GetNumberOfCells() == 0: continue locator = vtk.vtkCellLocator() locator.SetDataSet(polyData) locator.BuildLocator() meshToWorld = transformUtils.copyFrame(self.linkFrameContainer.getLinkFrame(linkName)) worldToMesh = meshToWorld.GetLinearInverse() self.locatorData[linkName] = {'locator':locator, 'polyData': polyData, 'meshToWorld': meshToWorld, 'worldToMesh': worldToMesh, 'cellData': cellData, 'normals': normals}
def updateObjectInCamera(self, obj, cameraObj): imageView = self.imageView objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform()) localToCameraT = vtk.vtkTransform() self.imageQueue.getTransform("local", imageView.imageName, localToCameraT) t = vtk.vtkTransform() t.PostMultiply() t.Concatenate(objToLocalT) t.Concatenate(localToCameraT) pd = filterUtils.transformPolyData(obj.polyData, t) """ normals = pd.GetPointData().GetNormals() cameraToImageT = vtk.vtkTransform() imageQueue.getCameraProjectionTransform(imageView.imageName, cameraToImageT) pd = filterUtils.transformPolyData(pd, cameraToImageT) pts = vnp.getNumpyFromVtk(pd, 'Points') pts[:,0] /= pts[:,2] pts[:,1] /= pts[:,2] pd.GetPointData().SetNormals(normals) """ self.imageQueue.projectPoints(imageView.imageName, pd) cameraObj.setPolyData(pd) self.addActorToImageOverlay(cameraObj, imageView)
def updateObjectInCamera(self, obj, cameraObj): imageView = self.imageView objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform()) localToCameraT = vtk.vtkTransform() self.imageQueue.getTransform('local', imageView.imageName, localToCameraT) t = vtk.vtkTransform() t.PostMultiply() t.Concatenate(objToLocalT) t.Concatenate(localToCameraT) pd = filterUtils.transformPolyData(obj.polyData, t) ''' normals = pd.GetPointData().GetNormals() cameraToImageT = vtk.vtkTransform() imageQueue.getCameraProjectionTransform(imageView.imageName, cameraToImageT) pd = filterUtils.transformPolyData(pd, cameraToImageT) pts = vnp.getNumpyFromVtk(pd, 'Points') pts[:,0] /= pts[:,2] pts[:,1] /= pts[:,2] pd.GetPointData().SetNormals(normals) ''' self.imageQueue.projectPoints(imageView.imageName, pd) cameraObj.setPolyData(pd) self.addActorToImageOverlay(cameraObj, imageView)
def fitSwitchBox(self, polyData, points): boxPosition = points[0] wallPoint = points[1] # find a frame that is aligned with wall searchRadius = 0.2 planePoints, normal = segmentation.applyLocalPlaneFit(polyData, points[0], searchRadius=np.linalg.norm(points[1] - points[0]), searchRadiusEnd=1.0) obj = vis.updatePolyData(planePoints, 'wall plane points', color=[0,1,0], visible=False) obj.setProperty('Point Size', 7) viewDirection = segmentation.SegmentationContext.getGlobalInstance().getViewDirection() if np.dot(normal, viewDirection) < 0: normal = -normal origin = segmentation.computeCentroid(planePoints) zaxis = [0,0,1] xaxis = normal yaxis = np.cross(zaxis, xaxis) yaxis /= np.linalg.norm(yaxis) zaxis = np.cross(xaxis, yaxis) zaxis /= np.linalg.norm(zaxis) t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis) # translate that frame to the box position t.PostMultiply() t.Translate(boxPosition) boxFrame = transformUtils.copyFrame(t) self.switchPlanner.spawnBoxAffordanceAtFrame(boxFrame)
def toggleFootstepWidget(displayPoint, view, useHorizontalWidget=False): obj, _ = vis.findPickedObject(displayPoint, view) if not obj: return False name = obj.getProperty('Name') if name in ('footstep widget', 'footstep widget frame'): om.removeFromObjectModel(om.findObjectByName('footstep widget')) return True match = re.match('^step (\d+)$', name) if not match: return False stepIndex = int(match.group(1)) existingWidget = om.findObjectByName('footstep widget') if existingWidget: previousStep = existingWidget.stepIndex print 'have existing widget for step:', stepIndex om.removeFromObjectModel(existingWidget) if previousStep == stepIndex: print 'returning because widget was for selected step' return True footMesh = shallowCopy(obj.polyData) footFrame = transformUtils.copyFrame(obj.getChildFrame().transform) if useHorizontalWidget: rpy = [0.0, 0.0, transformUtils.rollPitchYawFromTransform(footFrame)[2]] footFrame = transformUtils.frameFromPositionAndRPY(footFrame.GetPosition(), np.degrees(rpy)) footObj = vis.showPolyData(footMesh, 'footstep widget', parent='planning', alpha=0.2) footObj.stepIndex = stepIndex frameObj = vis.showFrame(footFrame, 'footstep widget frame', parent=footObj, scale=0.2) footObj.actor.SetUserTransform(frameObj.transform) footObj.setProperty('Color', obj.getProperty('Color')) frameObj.setProperty('Edit', True) rep = frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) frameObj.widget.HandleRotationEnabledOff() walkGoal = om.findObjectByName('walking goal') if walkGoal: walkGoal.setProperty('Edit', False) def onFootWidgetChanged(frame): footstepsDriver.onStepModified(stepIndex - 1, frame) frameObj.connectFrameModified(onFootWidgetChanged) return True
def getCameraToTargetTransform(self, targetFrame): targetToWorld = transformUtils.copyFrame(targetFrame) cameraToWorld = self.getCameraTransform() cameraToTarget = transformUtils.concatenateTransforms([cameraToWorld, targetToWorld.GetLinearInverse()]) focalDistance = np.linalg.norm(np.array(self.camera.GetFocalPoint()) - np.array(self.camera.GetPosition())) return cameraToTarget, focalDistance
def toggleFootstepWidget(displayPoint, view, useHorizontalWidget=False): obj, _ = vis.findPickedObject(displayPoint, view) if not obj: return False name = obj.getProperty("Name") if name in ("footstep widget", "footstep widget frame"): om.removeFromObjectModel(om.findObjectByName("footstep widget")) return True match = re.match("^step (\d+)$", name) if not match: return False stepIndex = int(match.group(1)) existingWidget = om.findObjectByName("footstep widget") if existingWidget: previousStep = existingWidget.stepIndex om.removeFromObjectModel(existingWidget) if previousStep == stepIndex: return True footMesh = shallowCopy(obj.polyData) footFrame = transformUtils.copyFrame(obj.getChildFrame().transform) if useHorizontalWidget: rpy = [0.0, 0.0, transformUtils.rollPitchYawFromTransform(footFrame)[2]] footFrame = transformUtils.frameFromPositionAndRPY( footFrame.GetPosition(), np.degrees(rpy) ) footObj = vis.showPolyData( footMesh, "footstep widget", parent="planning", alpha=0.2 ) footObj.stepIndex = stepIndex frameObj = vis.showFrame( footFrame, "footstep widget frame", parent=footObj, scale=0.2 ) footObj.actor.SetUserTransform(frameObj.transform) footObj.setProperty("Color", obj.getProperty("Color")) frameObj.setProperty("Edit", True) rep = frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) frameObj.widget.HandleRotationEnabledOff() walkGoal = om.findObjectByName("walking goal") if walkGoal: walkGoal.setProperty("Edit", False) return True
def setTfRootForCamera(): tfVis.setTfToWorld(vtk.vtkTransform()) opticalToRoot = transformUtils.copyFrame(om.findObjectByName('camera_depth_optical_frame').transform) rootToOptical = opticalToRoot.GetLinearInverse() cameraToWorld = getCameraToWorld() t = transformUtils.concatenateTransforms([rootToOptical, cameraToWorld]) tfVis.setTfToWorld(t)
def reset(self): self.storeTargetPose() targetToWorld = transformUtils.copyFrame(self.targetFrame.transform) cameraToWorld = self.getCameraTransform() cameraToTarget = transformUtils.concatenateTransforms([cameraToWorld, targetToWorld.GetLinearInverse()]) self.boomTransform = cameraToTarget self.focalDistance = np.linalg.norm(np.array(self.camera.GetFocalPoint()) - np.array(self.camera.GetPosition()))
def get_model_to_object_transform(self, object_name): """ Returns the transform from model to object for the given object :param object_name: str :return: vtkTransform """ T_obs_model = self._object_vis_containers[object_name][ 'template'].actor.GetUserTransform() return transformUtils.copyFrame(T_obs_model)
def planGraspLift(self): t3 = transformUtils.frameFromPositionAndRPY( [0.00,0.0, 0.04] , [0,0,0] ) liftTransform = transformUtils.copyFrame(self.board.graspFrame.transform) liftTransform.Concatenate(t3) self.board.liftFrame = vis.updateFrame(liftTransform, 'lift frame', parent="affordances", visible=False, scale=0.2) startPose = self.getPlanningStartPose() constraintSet = self.ikPlanner.planEndEffectorGoal(startPose, self.graspingHand, self.board.liftFrame, lockBase=self.lockBase, lockBack=self.lockBack) endPose, info = constraintSet.runIk() liftPlan = constraintSet.runIkTraj() self.addPlan(liftPlan)
def cycleEndEffector(self): if len(self.endEffectorFrames) is not 0: self.clearKeyframePoses() self.endEffectorFrames[self.endEffectorIndex].setProperty("Edit", False) self.endEffectorIndex = (self.endEffectorIndex + 1) % len(self.endEffectorFrames) self.endEffectorFrames[self.endEffectorIndex].setProperty("Edit", True) om.setActiveObject(self.endEffectorFrames[self.endEffectorIndex]) self.baseFrame = self.endEffectorFrames[self.endEffectorIndex] self.baseTransform = transformUtils.copyFrame(self.baseFrame.transform) self.resetDeltas() self.addKeyframePose()
def computeNextRoomFrame(self): assert self.targetAffordance t = transformUtils.frameFromPositionAndRPY(self.nextPosition, [0, 0, 0]) self.faceTransformLocal = transformUtils.copyFrame(t) # copy required t.Concatenate(self.targetFrame) self.faceFrameDesired = vis.showFrame(t, 'target frame desired', parent=self.targetAffordance, visible=False, scale=0.2)
def _computeBaseTransform(self, frame): currentDelta = None for frameId, frameData in self.frames.items(): if frameData.ref() is None: self._removeFrameId(frameId) elif frameData.ref() is frame: continue else: currentDelta = transformUtils.copyFrame(frameData.baseTransform.GetLinearInverse()) currentDelta.Concatenate(transformUtils.copyFrame(frameData.ref().transform)) break t = transformUtils.copyFrame(frame.transform) t.PostMultiply() if currentDelta: t.Concatenate(currentDelta.GetLinearInverse()) return t
def addNewFrame(): t = transformUtils.copyFrame(affordanceObj.getChildFrame().transform) t.PostMultiply() t.Translate(np.array(pickedPoint) - np.array(t.GetPosition())) newFrame = vis.showFrame( t, '%s frame %d' % (affordanceObj.getProperty('Name'), len(affordanceObj.children())), scale=0.2, parent=affordanceObj) affordanceObj.getChildFrame().getFrameSync().addFrame( newFrame, ignoreIncoming=True)
def computeBoardGraspFrames(self): t = transformUtils.frameFromPositionAndRPY(self.graspLeftHandFrameXYZ, self.graspLeftHandFrameRPY) t_copy = transformUtils.copyFrame(t) t_copy.Concatenate(self.frame.transform) self.graspLeftFrame = vis.updateFrame(t_copy, 'grasp left frame', parent=self.affordance, visible=False, scale=0.2) self.graspLeftFrame.addToView(app.getDRCView()) t = transformUtils.frameFromPositionAndRPY(self.graspRightHandFrameXYZ, self.graspRightHandFrameRPY) t_copy = transformUtils.copyFrame(t) t_copy.Concatenate(self.frame.transform) self.graspRightFrame = vis.updateFrame(t_copy, 'grasp right frame', parent=self.affordance, visible=False, scale=0.2) self.graspRightFrame.addToView(app.getDRCView())
def run(self): aff = self.getSelectedAffordance() affFrame = aff.getChildFrame().transform groundFrame = self.getGroundFrame().transform projectedXYZ = np.hstack([affFrame.GetPosition()[0:2], groundFrame.GetPosition()[2]]) result = transformUtils.copyFrame(affFrame) result.Translate(projectedXYZ - np.array(result.GetPosition())) outputName = self.properties.getProperty('Frame output name') outputName = outputName or '%s ground frame' % aff.getProperty('Name') vis.updateFrame(result, outputName, scale=0.2)
def compute_transforms(self): """ Make sure that grasp_data is set before you call this function :return: """ # gripper palm to world # this also happens to be gripper palm to object if self.T_W_G is None: self.T_W_G = transformUtils.copyFrame(self.grasp_data.grasp_frame) T_W_Gn = transformUtils.concatenateTransforms( [self.T_W_G, self._T_goal_object]) self._T_W_Gn = T_W_Gn
def cycleEndEffector(self): if len(self.endEffectorFrames) is not 0: self.clearKeyframePoses() self.endEffectorFrames[self.endEffectorIndex].setProperty( 'Edit', False) self.endEffectorIndex = (self.endEffectorIndex + 1) % len( self.endEffectorFrames) self.endEffectorFrames[self.endEffectorIndex].setProperty( 'Edit', True) om.setActiveObject(self.endEffectorFrames[self.endEffectorIndex]) self.baseFrame = self.endEffectorFrames[self.endEffectorIndex] self.baseTransform = transformUtils.copyFrame( self.baseFrame.transform) self.resetDeltas() self.addKeyframePose()
def drawObjectInCamera(objectName,visible=True): v = imageView.view q = cameraview.imageManager.queue localToCameraT = vtk.vtkTransform() q.getTransform('local', 'CAMERA_LEFT', localToCameraT) obj = om.findObjectByName(objectName) if obj is None: return objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform()) objPolyDataOriginal = obj.polyData pd = objPolyDataOriginal pd = filterUtils.transformPolyData(pd, objToLocalT) pd = filterUtils.transformPolyData(pd, localToCameraT) q.projectPoints('CAMERA_LEFT', pd) vis.showPolyData(pd, ('overlay ' + objectName), view=v, color=[0,1,0],parent='camera overlay',visible=visible)
def run(self): inputFrame = self.getInputFrame() translation = self.properties.getProperty('Translation') rpy = self.properties.getProperty('Rotation') offset = transformUtils.frameFromPositionAndRPY(translation, rpy) offset.PostMultiply() offset.Concatenate(transformUtils.copyFrame(inputFrame.transform)) outputFrame = vis.updateFrame(offset, self.properties.getProperty('Frame output name'), scale=0.2, parent=inputFrame.parent()) if not hasattr(inputFrame, 'frameSync'): inputFrame.frameSync = vis.FrameSync() inputFrame.frameSync.addFrame(inputFrame) inputFrame.frameSync.addFrame(outputFrame, ignoreIncoming=True)
def spawnRunningBoardAffordance(self): boxDimensions = [0.4, 1.0, 0.05] stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False) boxFrame = transformUtils.copyFrame(stanceFrame) boxFrame.PreMultiply() boxFrame.Translate(0.0, 0.0, -boxDimensions[2]/2.0) box = om.findObjectByName('running board') if not box: pose = transformUtils.poseFromTransform(boxFrame) desc = dict(classname='BoxAffordanceItem', Name='running board', Dimensions=boxDimensions, pose=pose) box = self.robotSystem.affordanceManager.newAffordanceFromDescription(desc) return box
def spawnFootstepFrame(self): # should snap this to boxFrame = om.findObjectByName('Switch Box').getChildFrame().transform goalFrame = transformUtils.copyFrame(self.footToBox) goalFrame.PostMultiply() goalFrame.Concatenate(boxFrame) # translate goal frame to match current robot height stanceFrame = self.getStanceFrame() stanceHeight = stanceFrame.GetPosition()[2] goalHeight = goalFrame.GetPosition()[2] goalFrame.PreMultiply() goalFrame.Translate(0.0, 0.0, stanceHeight - goalHeight) vis.updateFrame(goalFrame, 'switch box stance frame', scale=0.2)
def from_data_folder(data_folder, config=None): fr = FusionReconstruction() fr.data_dir = data_folder if config is None: config = FusionReconstruction.load_default_config() pose_data_filename = os.path.join(data_folder, 'images', 'pose_data.yaml') camera_info_filename = os.path.join(data_folder, 'images', 'camera_info.yaml') fr.config = config fr.kinematics_pose_data = utils.getDictFromYamlFilename(pose_data_filename) fr.camera_info = utils.getDictFromYamlFilename(camera_info_filename) fr.fusion_posegraph_filename = os.path.join(data_folder, 'images.posegraph') fr.fusion_pose_data.first_frame_to_world = transformUtils.copyFrame(fr._reconstruction_to_world) fr.reconstruction_filename = os.path.join(fr.data_dir, 'images.vtp') fr.setup() return fr
def makeStepMessages(self, stepFrames, leadingFoot, snapToTerrain=False): assert leadingFoot in ('left', 'right') isRightFootOffset = 0 if leadingFoot == 'left' else 1 leftPoints, rightPoints = FootstepsDriver.getContactPts() # note, assumes symmetrical feet. the loop below should be # updated to alternate between left/right contact point sets footOriginToSole = -np.mean(leftPoints, axis=0) stepMessages = [] for i, stepFrame in enumerate(stepFrames): t = transformUtils.copyFrame(stepFrame) t.PreMultiply() t.Translate(footOriginToSole) step = lcmdrc.footstep_t() step.pos = positionMessageFromFrame(t) step.is_right_foot = (i + isRightFootOffset) % 2 step.params = self.footstepsDriver.getDefaultStepParams() step.fixed_x = True step.fixed_y = True step.fixed_z = True step.fixed_roll = True step.fixed_pitch = True step.fixed_yaw = True if snapToTerrain: step.fixed_z = False step.fixed_roll = False step.fixed_pitch = False stepMessages.append(step) return stepMessages
def addNewFrame(): t = transformUtils.copyFrame(affordanceObj.getChildFrame().transform) t.PostMultiply() t.Translate(np.array(pickedPoint) - np.array(t.GetPosition())) newFrame = vis.showFrame(t, '%s frame %d' % (affordanceObj.getProperty('Name'), len(affordanceObj.children())), scale=0.2, parent=affordanceObj) affordanceObj.getChildFrame().getFrameSync().addFrame(newFrame, ignoreIncoming=True)
def drawFootstepPlan(self, msg, folder, left_color=None, right_color=None, alpha=1.0): for step in folder.children(): om.removeFromObjectModel(step) allTransforms = [] volFolder = getWalkingVolumesFolder() map(om.removeFromObjectModel, volFolder.children()) slicesFolder = getTerrainSlicesFolder() map(om.removeFromObjectModel, slicesFolder.children()) for i, footstep in enumerate(msg.footsteps): trans = footstep.pos.translation trans = [trans.x, trans.y, trans.z] quat = footstep.pos.rotation quat = [quat.w, quat.x, quat.y, quat.z] footstepTransform = transformUtils.transformFromPose(trans, quat) allTransforms.append(footstepTransform) if i < 2: continue if footstep.is_right_foot: mesh = getRightFootMesh() if (right_color is None): color = getRightFootColor() else: color = right_color else: mesh = getLeftFootMesh() if (left_color is None): color = getLeftFootColor() else: color = left_color # add gradual shading to steps to indicate destination frac = float(i)/ float(msg.num_steps-1) this_color = [0,0,0] this_color[0] = 0.25*color[0] + 0.75*frac*color[0] this_color[1] = 0.25*color[1] + 0.75*frac*color[1] this_color[2] = 0.25*color[2] + 0.75*frac*color[2] if self.show_contact_slices: self.drawContactVolumes(footstepTransform, color) contact_pts_left, contact_pts_right = FootstepsDriver.getContactPts() if footstep.is_right_foot: sole_offset = np.mean(contact_pts_right, axis=0) else: sole_offset = np.mean(contact_pts_left, axis=0) t_sole_prev = frameFromPositionMessage(msg.footsteps[i-2].pos) t_sole_prev.PreMultiply() t_sole_prev.Translate(sole_offset) t_sole = transformUtils.copyFrame(footstepTransform) t_sole.Translate(sole_offset) yaw = np.arctan2(t_sole.GetPosition()[1] - t_sole_prev.GetPosition()[1], t_sole.GetPosition()[0] - t_sole_prev.GetPosition()[0]) T_terrain_to_world = transformUtils.frameFromPositionAndRPY([t_sole_prev.GetPosition()[0], t_sole_prev.GetPosition()[1], 0], [0, 0, math.degrees(yaw)]) path_dist = np.array(footstep.terrain_path_dist) height = np.array(footstep.terrain_height) # if np.any(height >= trans[2]): terrain_pts_in_local = np.vstack((path_dist, np.zeros(len(footstep.terrain_path_dist)), height)) d = DebugData() for j in range(terrain_pts_in_local.shape[1]-1): d.addLine(terrain_pts_in_local[:,j], terrain_pts_in_local[:,j+1], radius=0.01) obj = vis.showPolyData(d.getPolyData(), 'terrain slice', parent=slicesFolder, visible=slicesFolder.getProperty('Visible'), color=[.8,.8,.3]) obj.actor.SetUserTransform(T_terrain_to_world) renderInfeasibility = False if renderInfeasibility and footstep.infeasibility > 1e-6: d = DebugData() start = allTransforms[i-1].GetPosition() end = footstepTransform.GetPosition() d.addArrow(start, end, 0.02, 0.005, startHead=True, endHead=True) vis.showPolyData(d.getPolyData(), 'infeasibility %d -> %d' % (i-2, i-1), parent=folder, color=[1, 0.2, 0.2]) stepName = 'step %d' % (i-1) obj = vis.showPolyData(mesh, stepName, color=this_color, alpha=alpha, parent=folder) obj.setIcon(om.Icons.Feet) frameObj = vis.showFrame(footstepTransform, stepName + ' frame', parent=obj, scale=0.3, visible=False) obj.actor.SetUserTransform(footstepTransform) obj.addProperty('Support Contact Groups', footstep.params.support_contact_groups, attributes=om.PropertyAttributes(enumNames=['Whole Foot', 'Front 2/3', 'Back 2/3'])) obj.properties.setPropertyIndex('Support Contact Groups', 0) obj.footstep_index = i obj.footstep_property_callback = obj.properties.connectPropertyChanged(functools.partial(self.onFootstepPropertyChanged, obj)) self.drawContactPts(obj, footstep, color=this_color)
def getFrameToOriginTransform(self, t): tCopy = transformUtils.copyFrame(t) tCopy.PostMultiply() tCopy.Concatenate(self.polaris.originFrame.transform.GetLinearInverse()) print transformUtils.poseFromTransform(tCopy) return tCopy