def processSnippet(): obj = om.getOrCreateContainer("continuous") om.getOrCreateContainer("cont debug", obj) if continuouswalkingDemo.processContinuousStereo: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/block_snippet_stereo.vtp")) polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) else: polyData = ioUtils.readPolyData(os.path.join(dataDir, "terrain/block_snippet.vtp")) vis.updatePolyData(polyData, "walking snapshot trimmed", parent="continuous") standingFootName = "l_foot" standingFootFrame = robotStateModel.getLinkFrame(standingFootName) vis.updateFrame(standingFootFrame, standingFootName, parent="continuous", visible=False) # Step 2: find all the surfaces in front of the robot (about 0.75sec) clusters = segmentation.findHorizontalSurfaces(polyData) if clusters is None: print "No cluster found, stop walking now!" return # Step 3: find the corners of the minimum bounding rectangles blocks, match_idx, groundPlane = cwdemo.extractBlocksFromSurfaces(clusters, standingFootFrame) footsteps = cwdemo.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame) cwdemo.drawFittedSteps(footsteps)
def onOpenVrml(self, filename): meshes, color = ioUtils.readVrml(filename) folder = om.getOrCreateContainer(os.path.basename(filename), parentObj=om.getOrCreateContainer("files")) for i, pair in enumerate(zip(meshes, color)): mesh, color = pair obj = vis.showPolyData(mesh, "mesh %d" % i, color=color, parent=folder) vis.addChildFrame(obj)
def getFootstepsFolder(): obj = om.findObjectByName('footstep plan') if obj is None: obj = om.getOrCreateContainer('footstep plan', parentObj=om.getOrCreateContainer('planning')) obj.setIcon(om.Icons.Feet) om.collapse(obj) return obj
def onHideBDIButton(self): self.driver.showBDIPlan = False self.driver.bdiRobotModel.setProperty('Visible', False) folder = om.getOrCreateContainer("BDI footstep plan") om.removeFromObjectModel(folder) folder = om.getOrCreateContainer("BDI adj footstep plan") om.removeFromObjectModel(folder)
def showFusedMaps(self): om.removeFromObjectModel(om.findObjectByName("stereo")) om.getOrCreateContainer("stereo") q = imageManager.queue cameraToLocalNow = vtk.vtkTransform() utime = q.getCurrentImageTime("CAMERA_TSDF") q.getTransform("CAMERA_LEFT", "local", utime, cameraToLocalNow) cameraToLocalFusedNow = vtk.vtkTransform() q.getTransform("CAMERA_LEFT_ALT", "local", utime, cameraToLocalFusedNow) for i in range(len(self.pointClouds)): fusedNowToLocalNow = vtk.vtkTransform() fusedNowToLocalNow.PreMultiply() fusedNowToLocalNow.Concatenate(cameraToLocalNow) fusedNowToLocalNow.Concatenate(cameraToLocalFusedNow.GetLinearInverse()) fusedTransform = vtk.vtkTransform() fusedTransform.PreMultiply() fusedTransform.Concatenate(fusedNowToLocalNow) fusedTransform.Concatenate(self.cameraToLocalFusedTransforms[i]) pd = filterUtils.transformPolyData(self.pointClouds[i], fusedTransform) vis.showFrame(fusedTransform, ("cloud frame " + str(i)), visible=True, scale=0.2, parent="stereo") vis.showPolyData(pd, ("stereo " + str(i)), parent="stereo", colorByName="rgb_colors")
def showHandSamples(self, numberOfSamples=15): om.removeFromObjectModel(om.findObjectByName('sampled hands')) handFolder = om.getOrCreateContainer('sampled hands', parentObj=om.getOrCreateContainer('debug')) for i in xrange(numberOfSamples): t = self.splineInterp(i/float(numberOfSamples-1)) handObj, f = self.handFactory.placeHandModelWithTransform(t, self.view, side=self.side, name='sample %d' % i, parent=handFolder) handObj.setProperty('Alpha', 0.3) handFolder.setProperty('Visible', False)
def drawBDIFootstepPlanAdjusted(self): if (self.bdi_plan_adjusted is None): return folder = om.getOrCreateContainer('BDI adj footstep plan') om.removeFromObjectModel(folder) folder = om.getOrCreateContainer('BDI adj footstep plan') folder.setIcon(om.Icons.Feet) om.collapse(folder) self.drawFootstepPlan(self.bdi_plan_adjusted, folder, [1.0, 1.0, 0.0] , [0.0, 1.0, 1.0])
def init(view): global _multisenseItem global multisenseDriver m = MultiSenseSource(view) m.start() multisenseDriver = m sensorsFolder = om.getOrCreateContainer('sensors') _multisenseItem = MultisenseItem(m) om.addToObjectModel(_multisenseItem, sensorsFolder) useMapServer = hasattr(drc, 'vtkMapServerSource') if useMapServer: mapServerSource = MapServerSource(view, callbackFunc=view.render) mapsServerContainer = om.ObjectModelItem('Map Server', icon=om.Icons.Robot) mapsServerContainer.source = mapServerSource om.addToObjectModel(mapsServerContainer, parentObj=sensorsFolder) mapServerSource.start() else: mapServerSource = None spindleDebug = SpindleAxisDebug(multisenseDriver) spindleDebug.addToView(view) om.addToObjectModel(spindleDebug, sensorsFolder) return multisenseDriver, mapServerSource
def getBDIAdjustedFootstepsFolder(): obj = om.findObjectByName('BDI adj footstep plan') if obj is None: obj = om.getOrCreateContainer('BDI adj footstep plan') obj.setIcon(om.Icons.Feet) om.collapse(obj) return obj
def getTerrainSlicesFolder(): obj = om.findObjectByName('terrain slices') if obj is None: obj = om.getOrCreateContainer('terrain slices', parentObj=getFootstepsFolder()) obj.setProperty('Visible', False) om.collapse(obj) return obj
def _addPlanItem(plan, name, itemClass): assert plan is not None item = itemClass(name) item.plan = plan om.removeFromObjectModel(om.findObjectByName(name)) om.addToObjectModel(item, om.getOrCreateContainer('segmentation')) return item
def _onPropertyChanged(self, propertySet, propertyName): om.ObjectModelItem._onPropertyChanged(self, propertySet, propertyName) if propertyName == 'Visible': self.actor.SetVisibility(self.getProperty(propertyName)) makeVisible = self.getProperty(propertyName) parent = om.getOrCreateContainer('COLLECTIONS') for coll in self.children(): coll.setProperty('Visible',makeVisible) elif propertyName == 'Start': self.actor.setRangeStart(self.getProperty(propertyName)) elif propertyName == 'End': self.actor.setRangeEnd(self.getProperty(propertyName)) elif propertyName == 'Points Alpha': self.actor.setAlphaPoints(self.getProperty(propertyName)) elif propertyName == 'Fill Scans': self.actor.setFillScans(self.getProperty(propertyName)) elif propertyName == 'Point Width': self.actor.setPointWidth(self.getProperty(propertyName)) elif propertyName == 'Pose Width': self.actor.setPoseWidth(self.getProperty(propertyName)) elif propertyName == 'Color Poses': self.actor.setColorPoses(self.getProperty(propertyName)) elif propertyName == 'Color by Time': self.actor.setColorByTime(self.getProperty(propertyName)) elif propertyName == 'Elevation by Time': self.actor.setElevationByTime(self.getProperty(propertyName)) elif propertyName == 'Elevation by Collection': self.actor.setElevationByCollection(self.getProperty(propertyName)) elif propertyName == 'Max Elevation': self.actor.setMaxElevation(self.getProperty(propertyName)) self.renderAllViews()
def showPolyData(polyData, name, color=None, colorByName=None, colorByRange=None, alpha=1.0, visible=True, view=None, parent='segmentation', cls=None): view = view or app.getCurrentRenderView() assert view cls = cls or PolyDataItem item = cls(name, polyData, view) if isinstance(parent, str): parentObj = om.getOrCreateContainer(parent) else: parentObj = parent om.addToObjectModel(item, parentObj) item.setProperty('Visible', visible) item.setProperty('Alpha', alpha) if colorByName and colorByName not in item.getArrayNames(): print 'showPolyData(colorByName=%s): array not found' % colorByName colorByName = None if colorByName: item.setProperty('Color By', colorByName) item.colorBy(colorByName, colorByRange) else: color = [1.0, 1.0, 1.0] if color is None else color item.setProperty('Color', [float(c) for c in color]) item.colorBy(None) return item
def loadRobotModel(name, view=None, parent='planning', urdfFile=None, color=None, visible=True, colorMode='URDF Colors'): if not urdfFile: urdfFile = drcargs.getDirectorConfig()['urdfConfig']['default'] if isinstance(parent, str): parent = om.getOrCreateContainer(parent) model = loadRobotModelFromFile(urdfFile) if not model: raise Exception('Error loading robot model from file: %s' % urdfFile) obj = RobotModelItem(model) om.addToObjectModel(obj, parent) obj.setProperty('Visible', visible) obj.setProperty('Name', name) obj.setProperty('Color', color or getRobotGrayColor()) if colorMode == 'Textures': obj.setProperty('Color Mode', 1) elif colorMode == 'Solid Color': obj.setProperty('Color Mode', 0) elif colorMode == 'URDF Colors': obj.setProperty('Color Mode', 2) if view is not None: obj.addToView(view) jointController = jointcontrol.JointController([obj]) fixedPointFile = drcargs.getDirectorConfig()['fixedPointFile'] jointController.setPose('q_nom', jointController.loadPoseFromFile(fixedPointFile)) return obj, jointController
def makeSplineGraspConstraints(self, ikPlanner, positionTolerance=0.03, angleToleranceInDegrees=15): params = self.computeHandleParameterization() segments = zip(params, params[1:]) #times = [np.linspace(segment[0], segment[1], 6) for segment in segments] #times = [[0.0, 0.3, 0.5], np.linspace(params[-2], params[-1], 6)] times = np.linspace(params[-2], params[-1], 6) times = np.hstack(times) times = np.unique(times) frames = [] for t in times: frames.append(self.splineInterp(t)) folder = om.getOrCreateContainer('constraint spline samples', parentObj=om.getOrCreateContainer('debug')) for f in frames: vis.showFrame(f, 'frame', scale=0.1) side = self.side graspToPalm = vtk.vtkTransform() graspToHand = ikPlanner.newGraspToHandFrame(side, graspToPalm) constraints = [] for f, t in zip(frames[:-1], times[:-1]): graspToWorld = f p, q = ikPlanner.createPositionOrientationGraspConstraints(side, graspToWorld, graspToHand) p.lowerBound = np.tile(-positionTolerance, 3) p.upperBound = np.tile(positionTolerance, 3) q.angleToleranceInDegrees = angleToleranceInDegrees if t >= params[-2]: q.angleToleranceInDegrees = 0 p.lowerBound = np.tile(-0.0, 3) p.upperBound = np.tile(0.0, 3) constraints.append(q) p.tspan = [t, t] q.tspan = [t, t] constraints.append(p) return constraints
def planSequenceRoomMap(self): self.graspingHand = 'left' self.targetSweepType = 'orientation' self.graspToHandLinkFrame = self.ikPlanner.newGraspToHandFrame(self.graspingHand) self.planFromCurrentRobotState = False self.plans = [] self.currentYawDegrees = 60 self.ikPlanner.ikServer.maxDegreesPerSecond = 10 self.nextPosition =[0,0,0] self.targetPath = [] self.resetTargetPath() self.fromTop = True self.mapFolder=om.getOrCreateContainer('room mapping') om.collapse(self.mapFolder) # taskQueue doesnt support a while loop: #while (self.currentYawDegrees >= -90): # self.getRoomSweepFrames() # self.planRoomReach()# move to next start point # self.planRoomSweep() # reach down/up # self.currentYawDegrees = self.currentYawDegrees - 30 # self.fromTop = not self.fromTop self.getRoomSweepFrames() self.planRoomReach()# move to next start point self.planRoomSweep() # reach down/up self.moveRoomSweepOnwards() self.getRoomSweepFrames() self.planRoomReach()# move to next start point self.planRoomSweep() # reach down/up self.moveRoomSweepOnwards() self.getRoomSweepFrames() self.planRoomReach()# move to next start point self.planRoomSweep() # reach down/up self.moveRoomSweepOnwards() self.getRoomSweepFrames() self.planRoomReach()# move to next start point self.planRoomSweep() # reach down/up self.moveRoomSweepOnwards() self.getRoomSweepFrames() self.planRoomReach()# move to next start point self.planRoomSweep() # reach down/up self.moveRoomSweepOnwards() self.getRoomSweepFrames() self.planRoomReach()# move to next start point self.planRoomSweep() # reach down/up self.moveRoomSweepOnwards() self.getRoomSweepFrames() self.planRoomReach()# move to next start point self.planRoomSweep() # reach down/up self.moveRoomSweepOnwards()
def findEndEffectors(self): planning = om.getOrCreateContainer("planning") if planning is not None: teleopFolder = planning.findChild("teleop plan") if teleopFolder is not None: framesFolder = teleopFolder.findChild("constraint frames") if framesFolder is not None: self.endEffectorFrames = framesFolder.children()
def _loadAffordanceFromDescription(self, desc): className = desc['classname'] cls = getattr(affordanceitems, className) aff = cls(desc['Name'], self.view) om.addToObjectModel(aff, parentObj=om.getOrCreateContainer('affordances')) vis.addChildFrame(aff) aff.loadDescription(desc, copyMode=aff.COPY_MODE_ALL) self.registerAffordance(aff, notify=False)
def _loadAffordanceFromDescription(self, desc): className = desc["classname"] cls = getattr(affordanceitems, className) aff = cls(desc["Name"], self.view) om.addToObjectModel(aff, parentObj=om.getOrCreateContainer("affordances")) frame = vis.addChildFrame(aff) frame.setProperty("Deletable", False) aff.loadDescription(desc, copyMode=aff.COPY_MODE_ALL) self.registerAffordance(aff, notify=False)
def drawModel(self, model): modelFolder = om.getOrCreateContainer(model.name, parentObj=self.getRootFolder()) markerFolder = om.getOrCreateContainer("markers", parentObj=modelFolder) modelName = model.name markerObjects = markerFolder.children() if len(markerObjects) != model.nummarkers: for obj in markerObjects: om.removeFromObjectModel(obj) modelColor = vis.getRandomColor() markerObjects = self.createMarkerObjects(model.nummarkers, markerFolder, modelName, modelColor) self.models[modelName] = markerObjects if len(markerObjects): modelColor = markerObjects[0].getProperty("Color") for i, marker in enumerate(model.markers): xyz = np.array(marker.xyz) * self.unitConversion markerFrame = vtk.vtkTransform() markerFrame.Translate(xyz) markerObjects[i].getChildFrame().copyFrame(markerFrame) if self.drawEdges: d = DebugData() for m1 in model.markers: xyz = np.array(m1.xyz) * self.unitConversion for m2 in model.markers: xyz2 = np.array(m2.xyz) * self.unitConversion d.addLine(xyz, xyz2) edges = shallowCopy(d.getPolyData()) vis.updatePolyData(edges, modelName + " edges", color=modelColor, parent=modelFolder) else: edgesObj = om.findObjectByName(modelName + " edges") om.removeFromObjectModel(edgesObj) computeModelFrame = True if computeModelFrame: pos = np.array(model.segments[0].T) * self.unitConversion rpy = np.array(model.segments[0].A) modelFrame = transformUtils.frameFromPositionAndRPY(pos, np.degrees(rpy)) vis.updateFrame(modelFrame, modelName + " frame", parent=modelFolder, scale=0.1)
def __init__(self, uid, view, seed_pose, irisDriver, existing_region=None): d = DebugData() self.uid = uid vis.PolyDataItem.__init__(self, "IRIS region {:d}".format(uid), d.getPolyData(), view) self.transform = seed_pose d.addSphere((0,0,0), radius=0.02) self.seedObj = vis.showPolyData(d.getPolyData(), 'region seed', parent=om.getOrCreateContainer('IRIS region seeds')) self.seedObj.actor.SetUserTransform(self.transform) self.frameObj = vis.showFrame(self.transform, 'region seed frame', scale=0.2, visible=False, parent=self.seedObj) self.frameObj.setProperty('Edit', True) self.frameObj.widget.HandleRotationEnabledOff() terrain = om.findObjectByName('HEIGHT_MAP_SCENE') if terrain: rep = self.frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) pos = np.array(self.frameObj.transform.GetPosition()) polyData = filterUtils.removeNonFinitePoints(terrain.polyData) if polyData.GetNumberOfPoints(): polyData = segmentation.labelDistanceToLine(polyData, pos, pos+[0,0,1]) polyData = segmentation.thresholdPoints(polyData, 'distance_to_line', [0.0, 0.1]) if polyData.GetNumberOfPoints(): pos[2] = np.nanmax(vnp.getNumpyFromVtk(polyData, 'Points')[:,2]) self.frameObj.transform.Translate(pos - np.array(self.frameObj.transform.GetPosition())) self.placer = PlacerWidget(view, self.seedObj, terrain) self.placer.start() else: self.frameObj.setProperty('Edit', True) self.frameObj.setProperty('Visible', True) self.driver = irisDriver self.safe_region = None self.addProperty('Visible', True) self.addProperty('Enabled for Walking', True) self.addProperty('Alpha', 1.0) self.addProperty('Color', QtGui.QColor(200,200,20)) self.frameObj.connectFrameModified(self.onFrameModified) if existing_region is None: self.onFrameModified(self.frameObj) else: self.setRegion(existing_region) self.setProperty('Alpha', 0.5) self.setProperty('Color', QtGui.QColor(220,220,220))
def run(self): folder = om.getOrCreateContainer('affordances') frame = self.computeAffordanceFrame() mesh = segmentation.getDrillMesh() params = segmentation.getDrillAffordanceParams(np.array(frame.GetPosition()), [1,0,0], [0,1,0], [0,0,1]) affordance = vis.showPolyData(mesh, 'drill', color=[0.0, 1.0, 0.0], cls=affordanceitems.FrameAffordanceItem, parent=folder) frame = vis.showFrame(frame, 'drill frame', parent=affordance, visible=False, scale=0.2) affordance.actor.SetUserTransform(frame.transform) affordance.setAffordanceParams(params) affordance.updateParamsFromActorTransform()
def init(view): global KinectQueue, _kinectItem, _kinectSource KinectQueue = PythonQt.dd.ddKinectLCM(lcmUtils.getGlobalLCMThread()) KinectQueue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file) _kinectSource = KinectSource(view, KinectQueue) _kinectSource.start() sensorsFolder = om.getOrCreateContainer('sensors') _kinectItem = KinectItem(_kinectSource) om.addToObjectModel(_kinectItem, sensorsFolder)
def init(view): global PointCloudQueue, _pointcloudItem, _pointcloudSource PointCloudQueue = PythonQt.dd.ddPointCloudLCM(lcmUtils.getGlobalLCMThread()) PointCloudQueue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file) _pointcloudSource = PointCloudSource(view, PointCloudQueue) _pointcloudSource.start() sensorsFolder = om.getOrCreateContainer('sensors') _pointcloudItem = PointCloudItem(_pointcloudSource) om.addToObjectModel(_pointcloudItem, sensorsFolder)
def getRootFolder(self): path = tuple() if path in self.pathToItemCache: return self.pathToItemCache[path] else: folder = om.getOrCreateContainer( self.name.lower(), parentObj=om.findObjectByName('scene')) self.pathToItemCache[path] = folder self.itemToPathCache[folder] = path folder.connectRemovedFromObjectModel(self.onItemRemoved) return folder
def newTerrainItem(self, tform, uid=None, region=None): if uid is None: uid = self.getNewUID() elif uid in self.regions: return self.regions[uid] view = app.getCurrentRenderView() item = TerrainRegionItem(uid, view, tform, self, region) parentObj = om.getOrCreateContainer('Safe terrain regions') om.addToObjectModel(item, parentObj) self.regions[uid] = item return item
def loadRobotModel(name=None, view=None, parent='scene', urdfFile=None, color=None, visible=True, colorMode='URDF Colors', useConfigFile=True): if not urdfFile: urdfFile = drcargs.getDirectorConfig()['urdfConfig']['default'] if isinstance(parent, str): parent = om.getOrCreateContainer(parent) model = loadRobotModelFromFile(urdfFile) if not model: raise Exception('Error loading robot model from file: %s' % urdfFile) obj = RobotModelItem(model) om.addToObjectModel(obj, parent) if name: obj.setProperty('Name', name) obj.setProperty('Visible', visible) obj.setProperty('Color', color or getRobotGrayColor()) if colorMode == 'Textures': obj.setProperty('Color Mode', 1) elif colorMode == 'Solid Color': obj.setProperty('Color Mode', 0) elif colorMode == 'URDF Colors': obj.setProperty('Color Mode', 2) if view is not None: obj.addToView(view) if useConfigFile: jointNames = None else: jointNames = model.getJointNames() jointController = jointcontrol.JointController([obj], jointNames=jointNames) if useConfigFile: fixedPointFile = drcargs.getDirectorConfig()['fixedPointFile'] jointController.setPose( 'q_nom', jointController.loadPoseFromFile(fixedPointFile)) return obj, jointController
def loadRobotModel( name=None, view=None, parent="scene", urdfFile=None, color=None, visible=True, colorMode="URDF Colors", useConfigFile=True, robotName="", ): if isinstance(parent, str): parent = om.getOrCreateContainer(parent) if urdfFile: model = loadRobotModelFromFile(urdfFile) haveModel = True else: model = None useConfigFile = True # we can't extract joint names from the model haveModel = False colorModes = {"Solid Color": 0, "Textures": 1, "URDF Colors": 2} obj = RobotModelItem( model=model, visible=visible, color=color or getRobotGrayColor(), colorMode=colorModes[colorMode], robotName=robotName, ) om.addToObjectModel(obj, parent) if name: obj.setProperty("Name", name) if view is not None: obj.addToView(view) if useConfigFile: jointNames = None else: jointNames = model.getJointNames() jointController = jointcontrol.JointController([obj], jointNames=jointNames, robotName=robotName, pushToModel=haveModel) return obj, jointController
def launchObjectAlignment(self, objectName, useAboveTablePointcloud=True): """ Launches the object alignment tool. :param objectName: :param useAboveTablePointcloud: :return: """ if self.objectAlignmentTool is not None: self.objectAlignmentTool.picker.stop() self.objectAlignmentTool.scenePicker.stop() self.objectAlignmentTool.widget.close() pointCloud = self.aboveTablePolyData # pointCloud = om.findObjectByName('reconstruction').polyData objectPolyData = utils.getObjectPolyData(self.objectData, objectName) resultsDict = dict() self.objectAlignmentResults[objectName] = resultsDict parent = om.getOrCreateContainer('global registration') def onFinishAlignment(): """ this is a callback that objectAlignmentTool will call when it finishes :param d: :return: """ resultsDict[ 'modelToFirstFrameTransform'] = transformUtils.concatenateTransforms( [ resultsDict['modelToSceneTransform'], self.firstFrameToWorldTransform.GetLinearInverse() ]) modelToWorld = resultsDict['modelToSceneTransform'] visObj = om.findObjectByName(objectName) om.removeFromObjectModel(visObj) pose = transformUtils.poseFromTransform(modelToWorld) utils.loadObjectMesh(self.affordanceManager, objectName, visName=objectName, pose=pose) print "cropping and running ICP" self.ICPWithCropBasedOnModel(objectName=objectName) self.objectAlignmentTool = ObjectAlignmentTool( self.cameraView, modelPolyData=objectPolyData, pointCloud=pointCloud, resultsDict=resultsDict, callback=onFinishAlignment)
def processSnippet(): obj = om.getOrCreateContainer('continuous') om.getOrCreateContainer('cont debug', obj) if (continuouswalkingDemo.processContinuousStereo): polyData = ioUtils.readPolyData( os.path.join(dataDir, 'terrain/block_snippet_stereo.vtp')) polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) else: polyData = ioUtils.readPolyData( os.path.join(dataDir, 'terrain/block_snippet.vtp')) vis.updatePolyData(polyData, 'walking snapshot trimmed', parent='continuous') standingFootName = cwdemo.ikPlanner.leftFootLink standingFootFrame = robotStateModel.getLinkFrame(standingFootName) vis.updateFrame(standingFootFrame, standingFootName, parent='continuous', visible=False) # Step 2: find all the surfaces in front of the robot (about 0.75sec) clusters = segmentation.findHorizontalSurfaces(polyData) if (clusters is None): print "No cluster found, stop walking now!" return # Step 3: find the corners of the minimum bounding rectangles blocks, match_idx, groundPlane = cwdemo.extractBlocksFromSurfaces( clusters, standingFootFrame) footsteps = cwdemo.placeStepsOnBlocks(blocks, groundPlane, standingFootName, standingFootFrame) cwdemo.drawFittedSteps(footsteps)
def getPathFolder(self, path): path = tuple(path) if path in self.pathToItemCache: # print "hit for path:", path return self.pathToItemCache[path] else: # print "miss for path:", path folder = self.getRootFolder() for element in path: folder = om.getOrCreateContainer(element, parentObj=folder) folder.connectRemovedFromObjectModel(self.onItemRemoved) self.pathToItemCache[path] = folder self.itemToPathCache[folder] = path return folder
def handle_message(self, msg): # Limits the rate of message handling, since redrawing is done in the # message handler. self._sub.setSpeedLimit(30) # Removes the folder completely. om.removeFromObjectModel(om.findObjectByName(self._folder_name)) # Recreates folder. folder = om.getOrCreateContainer(self._folder_name) # A map from pair of body names to a list of contact forces collision_pair_to_forces = {} for contact in msg.contact_info: point = np.array([ contact.contact_point[0], contact.contact_point[1], contact.contact_point[2] ]) force = np.array([ contact.contact_force[0], contact.contact_force[1], contact.contact_force[2] ]) mag = np.linalg.norm(force) if mag > 1e-4: mag = 0.3 / mag key1 = (str(contact.body1_name), str(contact.body2_name)) key2 = (str(contact.body2_name), str(contact.body1_name)) if key1 in collision_pair_to_forces: collision_pair_to_forces[key1].append( (point, point + mag * force)) elif key2 in collision_pair_to_forces: collision_pair_to_forces[key2].append( (point, point + mag * force)) else: collision_pair_to_forces[key1] = [(point, point + mag * force)] for key, list_of_forces in collision_pair_to_forces.iteritems(): d = DebugData() for force_pair in list_of_forces: d.addArrow(start=force_pair[0], end=force_pair[1], tubeRadius=0.005, headRadius=0.01) vis.showPolyData(d.getPolyData(), str(key), parent=folder, color=[0, 1, 0])
def __init__(self, robotSystem, openniDepthPointCloud, measurementPanel, imageManager): self.robotSystem = robotSystem self.openniDepthPointCloud = openniDepthPointCloud self.measurementPanel = measurementPanel self.imageManager = imageManager self.visFolder = om.getOrCreateContainer('data collection') self.cameraName = 'OPENNI_FRAME_LEFT' self.savedTransformFilename = os.path.join( CorlUtils.getCorlBaseDir(), 'sandbox', 'reconstruction_robot_frame.yaml') self.frustumVis = dict() self.loadSavedData() self.setupDevTools()
def init(view): global PointCloudQueue, _pointcloudItem, _pointcloudSource PointCloudQueue = PythonQt.dd.ddPointCloudLCM( lcmUtils.getGlobalLCMThread()) PointCloudQueue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file) _pointcloudSource = PointCloudSource(view, PointCloudQueue) _pointcloudSource.start() sensorsFolder = om.getOrCreateContainer('sensors') _pointcloudItem = PointCloudItem(_pointcloudSource) om.addToObjectModel(_pointcloudItem, sensorsFolder)
def initRobotState(self, robotSystem): from director import roboturdf from director import objectmodel as om robotStateModel, robotStateJointController = roboturdf.loadRobotModel( "robot state model", robotSystem.view, color=roboturdf.getRobotGrayColor(), colorMode=robotSystem.directorConfig["colorMode"], parent=om.getOrCreateContainer( "sensors", om.getOrCreateContainer(robotSystem.robotName)), visible=True, robotName=robotSystem.robotName, ) # robotStateJointController.setPose('EST_ROBOT_STATE', robotStateJointController.getPose('q_nom')) # roboturdf.startModelPublisherListener([(robotStateModel, robotStateJointController)]) return FieldContainer( robotStateModel=robotStateModel, robotStateJointController=robotStateJointController, )
def _handle_message(self, msg): # Removes the folder completely. This is the clearest and easiest way # of doing update. Otherwise we need to store the FrameItem returned # by vis.showFrame, and update its transform. Also need to handle # enable / disable. om.removeFromObjectModel(om.findObjectByName(self._folder_name)) # Recreates folder. folder = om.getOrCreateContainer(self._folder_name) for i in range(0, msg.num_links): name = msg.link_name[i] transform = transformUtils.transformFromPose( msg.position[i], msg.quaternion[i]) vis.showFrame(transform, name, parent=folder, scale=0.1);
def _handle_message(self, msg): # Removes the folder completely. This is the clearest and easiest way # of doing update. Otherwise we need to store the FrameItem returned # by vis.showFrame, and update its transform. Also need to handle # enable / disable. om.removeFromObjectModel(om.findObjectByName(self._folder_name)) # Recreates folder. folder = om.getOrCreateContainer(self._folder_name) for i in range(0, msg.num_links): name = msg.link_name[i] transform = transformUtils.transformFromPose( msg.position[i], msg.quaternion[i]) vis.showFrame(transform, name, parent=folder, scale=0.1)
def showFrame(frame, name, view=None, parent='segmentation', scale=0.35, visible=True): view = view or app.getCurrentRenderView() assert view if isinstance(parent, str): parentObj = om.getOrCreateContainer(parent) else: parentObj = parent item = FrameItem(name, frame, view) om.addToObjectModel(item, parentObj) item.setProperty('Visible', visible) item.setProperty('Scale', scale) return item
def showFusedMaps(self): om.removeFromObjectModel(om.findObjectByName('stereo')) om.getOrCreateContainer('stereo') q = imageManager.queue cameraToLocalNow = vtk.vtkTransform() utime = q.getCurrentImageTime('CAMERA_TSDF') q.getTransform('MULTISENSE_CAMERA_LEFT', 'local', utime, cameraToLocalNow) cameraToLocalFusedNow = vtk.vtkTransform() q.getTransform('MULTISENSE_CAMERA_LEFT_ALT', 'local', utime, cameraToLocalFusedNow) for i in range(len(self.pointClouds)): fusedNowToLocalNow = vtk.vtkTransform() fusedNowToLocalNow.PreMultiply() fusedNowToLocalNow.Concatenate(cameraToLocalNow) fusedNowToLocalNow.Concatenate( cameraToLocalFusedNow.GetLinearInverse()) fusedTransform = vtk.vtkTransform() fusedTransform.PreMultiply() fusedTransform.Concatenate(fusedNowToLocalNow) fusedTransform.Concatenate(self.cameraToLocalFusedTransforms[i]) pd = filterUtils.transformPolyData(self.pointClouds[i], fusedTransform) vis.showFrame(fusedTransform, ('cloud frame ' + str(i)), visible=True, scale=0.2, parent='stereo') vis.showPolyData(pd, ('stereo ' + str(i)), parent='stereo', colorByName='rgb_colors')
def spawnTargetFrame(self): debugFolder = om.getOrCreateContainer('debug') om.removeFromObjectModel('target frame') handLink = str(self.robotSystem.ikPlanner.getHandLink()) handFrame = transformUtils.copyFrame( self.robotSystem.robotStateModel.getLinkFrame(handLink)) handFrame.PreMultiply() handFrame.Translate(0.02, 0, 0) self.targetFrame = vis.updateFrame(handFrame, 'target frame', parent=debugFolder, scale=0.15) return self.targetFrame
def showText(text, name, fontSize=18, position=(10, 10), parent=None, view=None): view = view or app.getCurrentRenderView() assert view item = TextItem(name, text, view=view) item.setProperty('Font Size', fontSize) item.setProperty('Position', list(position)) if isinstance(parent, str): parentObj = om.getOrCreateContainer(parent) else: parentObj = parent om.addToObjectModel(item, parentObj) return item
def newPolyData(self, name, view, parent=None): polyData = self.getNewHandPolyData() if isinstance(parent, str): parent = om.getOrCreateContainer(parent) color = [1.0, 1.0, 0.0] if self.side == 'right': color = [0.33, 1.0, 0.0] obj = vis.showPolyData(polyData, name, view=view, color=color, visible=False, parent=parent) obj.side = self.side frame = vtk.vtkTransform() frame.PostMultiply() obj.actor.SetUserTransform(frame) frameObj = vis.showFrame(frame, '%s frame' % name, view=view, scale=0.2, visible=False, parent=obj) return obj
def run(self): radius = self.properties.getProperty('Radius') thickness = 0.03 folder = om.getOrCreateContainer('affordances') frame = self.computeValveFrame() d = DebugData() d.addLine(np.array([0, 0, -thickness/2.0]), np.array([0, 0, thickness/2.0]), radius=radius) mesh = d.getPolyData() params = dict(radius=radius, length=thickness, xwidth=radius, ywidth=radius, zwidth=thickness, otdf_type='steering_cyl', friendly_name='valve') affordance = vis.showPolyData(mesh, 'valve', color=[0.0, 1.0, 0.0], cls=affordanceitems.FrameAffordanceItem, parent=folder, alpha=1.0) frame = vis.showFrame(frame, 'valve frame', parent=affordance, visible=False, scale=radius) affordance.actor.SetUserTransform(frame.transform) affordance.setAffordanceParams(params) affordance.updateParamsFromActorTransform()
def drawResult(self, result): radius = 0.01 parent = om.getOrCreateContainer('Hand Eye Calibration') d = DebugData() for cameraLocation in result['cameraLocations']: d.addSphere(cameraLocation, radius=radius) vis.updatePolyData(d.getPolyData(), 'All Camera Locations', parent=parent, color=[1,0,0]) d = DebugData() for data in result['feasiblePoses']: d.addSphere(data['cameraLocation'], radius=radius) vis.updatePolyData(d.getPolyData(), 'Feasible Camera Locations', parent=parent, color=[0,1,0])
def _handle_message(self, msg): if set(self._link_name_published) != set(msg.link_name): # Removes the folder completely. # TODO(eric.cousineau): Consider only removing frames that are in # the set difference. om.removeFromObjectModel(om.findObjectByName(self._folder_name)) self._link_name_published = msg.link_name # Recreates folder. folder = om.getOrCreateContainer(self._folder_name) for i in range(0, msg.num_links): name = msg.link_name[i] transform = transformUtils.transformFromPose( msg.position[i], msg.quaternion[i]) # `vis.updateFrame` will either create or update the frame # according to its name within its parent folder. vis.updateFrame(transform, name, parent=folder, scale=0.1);
def _handleMarkerSets(self, marker_sets): # Get the list of existing marker sets so we can track any # which disappear. remaining_set_names = set( [x.getProperty('Name') for x in self.marker_sets.children()]) for marker_set in marker_sets: set_name = 'Marker set ' + marker_set.name remaining_set_names.discard(set_name) set_folder = om.getOrCreateContainer(set_name, parentObj=self.marker_sets) marker_ids = range(marker_set.num_markers) self._updateMarkerCollection(marker_set.name + '.', set_folder, marker_ids, marker_set.xyz) for remaining_set in remaining_set_names: obj = om.findObjectByName(remaining_set, self.marker_sets) om.removeFromObjectModel(obj)
def rotateReconstructionToStandardOrientation(self, pointCloud=None, savePoseToFile=True, filename=None): """ Rotates the reconstruction to right side up and saves the transform to a file :param pointcloud: :return: """ if pointCloud is None: pointCloud = om.findObjectByName('reconstruction').polyData returnData = self.segmentTable(scenePolyData=pointCloud, visualize=False) normal = returnData['normal'] polyData = returnData['polyData'] # get transform that rotates normal --> [0,0,1] origin = returnData['pointOnTable'] - 0.5 * normal pointCloudToWorldTransform = transformUtils.getTransformFromOriginAndNormal( origin, normal).GetLinearInverse() rotatedPolyData = filterUtils.transformPolyData( polyData, pointCloudToWorldTransform) parent = om.getOrCreateContainer('segmentation') vis.updatePolyData(rotatedPolyData, 'reconstruction', colorByName='RGB', parent=parent) if savePoseToFile: if filename is None: assert self.pathDict is not None filename = self.pathDict['transforms'] (pos, quat ) = transformUtils.poseFromTransform(pointCloudToWorldTransform) d = dict() d['firstFrameToWorld'] = [pos.tolist(), quat.tolist()] utils.saveDictToYaml(d, filename) return pointCloudToWorldTransform
def spawnTargetAffordance(self): for obj in om.getObjects(): if obj.getProperty('Name') == 'target': om.removeFromObjectModel(obj) targetFrame = transformUtils.frameFromPositionAndRPY([0.6, 0.2, 0.6], [180, 0, 90]) folder = om.getOrCreateContainer('affordances') z = DebugData() z.addLine(np.array([0, 0, 0]), np.array([-self.boxLength, 0, 0]), radius=0.02) # main bar z.addLine(np.array([-self.boxLength, 0, 0]), np.array([-self.boxLength, 0, self.boxLength]), radius=0.02) # main bar z.addLine(np.array([-self.boxLength, 0, self.boxLength]), np.array([0, 0, self.boxLength]), radius=0.02) # main bar z.addLine(np.array([0, 0, self.boxLength]), np.array([0, 0, 0]), radius=0.02) # main bar targetMesh = z.getPolyData() self.targetAffordance = vis.showPolyData( targetMesh, 'target', color=[0.0, 1.0, 0.0], cls=affordanceitems.FrameAffordanceItem, parent=folder, alpha=0.3) self.targetAffordance.actor.SetUserTransform(targetFrame) self.targetFrame = vis.showFrame(targetFrame, 'target frame', parent=self.targetAffordance, visible=False, scale=0.2) self.targetFrame = self.targetFrame.transform params = dict(length=self.boxLength, otdf_type='target', friendly_name='target') self.targetAffordance.setAffordanceParams(params) self.targetAffordance.updateParamsFromActorTransform()
def onShowMapButton(self): # reloads the map each time - in case its changed on disk: #if (self.octomap_cloud is None): filename = director.getDRCBaseDir() + "/software/build/data/octomap.pcd" self.octomap_cloud = io.readPolyData(filename) # c++ object called vtkPolyData assert (self.octomap_cloud.GetNumberOfPoints() !=0 ) # clip point cloud to height - doesnt work very well yet. need to know robot's height #self.octomap_cloud = segmentation.cropToLineSegment(self.octomap_cloud, np.array([0,0,-10]), np.array([0,0,3]) ) # access to z values #points= vnp.getNumpyFromVtk(self.octomap_cloud, 'Points') #zvalues = points[:,2] # remove previous map: folder = om.getOrCreateContainer("segmentation") om.removeFromObjectModel(folder) vis.showPolyData(self.octomap_cloud, 'prior map', alpha=1.0, color=[0,0,0.4])
def init(view): global _multisenseItem global multisenseDriver sensorsFolder = om.getOrCreateContainer('sensors') m = MultiSenseSource(view) m.start() multisenseDriver = m _multisenseItem = MultisenseItem(m) om.addToObjectModel(_multisenseItem, sensorsFolder) queue = PythonQt.dd.ddPointCloudLCM(lcmUtils.getGlobalLCMThread()) queue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file) lidarNames = queue.getLidarNames() for lidar in lidarNames: if queue.displayLidar(lidar): l = LidarSource(view, queue.getLidarChannelName(lidar), queue.getLidarCoordinateFrame(lidar), queue.getLidarFriendlyName(lidar), queue.getLidarIntensity(lidar)) l.start() lidarDriver = l _lidarItem = LidarItem(l) om.addToObjectModel(_lidarItem, sensorsFolder) useMapServer = hasattr(drc, 'vtkMapServerSource') if useMapServer: mapServerSource = MapServerSource(view, callbackFunc=view.render) mapsServerContainer = om.ObjectModelItem('Map Server', icon=om.Icons.Robot) mapsServerContainer.source = mapServerSource om.addToObjectModel(mapsServerContainer, parentObj=sensorsFolder) mapServerSource.start() else: mapServerSource = None spindleDebug = SpindleAxisDebug(multisenseDriver) spindleDebug.addToView(view) om.addToObjectModel(spindleDebug, sensorsFolder) return multisenseDriver, mapServerSource
def showPolyData( polyData, name, color=None, colorByName=None, colorByRange=None, alpha=1.0, visible=True, view=None, parent="segmentation", cls=None, ): view = view or app.getCurrentRenderView() assert view cls = cls or PolyDataItem item = cls(name, polyData, view) if isinstance(parent, str): parentObj = om.getOrCreateContainer(parent) else: parentObj = parent om.addToObjectModel(item, parentObj) item.setProperty("Visible", visible) item.setProperty("Alpha", alpha) if colorByName and colorByName not in item.getArrayNames(): print "showPolyData(colorByName=%s): array not found" % colorByName colorByName = None if colorByName: item.setProperty("Color By", colorByName) item.colorBy(colorByName, colorByRange) else: color = [1.0, 1.0, 1.0] if color is None else color item.setProperty("Color", [float(c) for c in color]) item.colorBy(None) return item
def cropPointCloudToModel(pointCloud, objectPointCloud, distanceThreshold=0.02, visualize=True, applyEuclideanClustering=True): """ Crops pointCloud to just the points withing distanceThreshold of objectPointCloud :param pointCloud: :param objectPointCloud: :param distanceThreshold: :return: cropped pointcloud """ registration = GlobalRegistrationUtils pointCloud = GlobalRegistration.cropPointCloudToModelBoundingBox( pointCloud, objectPointCloud, scaleFactor=1.5) arrayName = 'distance_to_mesh' print "computing point to point distance" dists = registration.computePointToPointDistance( pointCloud, objectPointCloud) vnp.addNumpyToVtk(pointCloud, dists, arrayName) polyData = filterUtils.thresholdPoints(pointCloud, arrayName, [0.0, distanceThreshold]) # this stuff may be unecessary if applyEuclideanClustering: # polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) polyData = segmentation.applyEuclideanClustering( polyData, clusterTolerance=0.04) polyData = segmentation.thresholdPoints(polyData, 'cluster_labels', [1, 1]) if visualize: parent = om.getOrCreateContainer('global registration') vis.updatePolyData(polyData, 'cropped pointcloud', color=[0, 1, 0], parent=parent) return polyData
def run(self): folder = om.getOrCreateContainer('affordances') frame = self.computeAffordanceFrame() mesh = segmentation.getDrillMesh() params = segmentation.getDrillAffordanceParams( np.array(frame.GetPosition()), [1, 0, 0], [0, 1, 0], [0, 0, 1]) affordance = vis.showPolyData(mesh, 'drill', color=[0.0, 1.0, 0.0], cls=affordanceitems.FrameAffordanceItem, parent=folder) frame = vis.showFrame(frame, 'drill frame', parent=affordance, visible=False, scale=0.2) affordance.actor.SetUserTransform(frame.transform) affordance.setAffordanceParams(params) affordance.updateParamsFromActorTransform()
def loadObjectMeshes(self): stream = file(self.pathDict['registrationResult']) registrationResult = yaml.load(stream) folder = om.getOrCreateContainer('data files') for objName, data in registrationResult.iteritems(): filename = data['filename'] if len(filename) == 0: filename = utils.getObjectMeshFilename(objName) else: filename = os.path.join(utils.getLabelFusionDataDir(), filename) polyData = ioUtils.readPolyData(filename) color = self.getColorFromIndex(objName) obj = vis.showPolyData(polyData, name=objName, parent=folder, color=color) self.storedColors[objName] = color objToWorld = transformUtils.transformFromPose(*data['pose']) self.objectToWorld[objName] = objToWorld obj.actor.SetUserTransform(objToWorld)
def init(view): global _multisenseItem global multisenseDriver global _lidarItem global lidarDriver sensorsFolder = om.getOrCreateContainer('sensors') m = MultiSenseSource(view) m.start() multisenseDriver = m _multisenseItem = MultisenseItem(m) om.addToObjectModel(_multisenseItem, sensorsFolder) useLidarSource = True if useLidarSource: l = LidarSource(view) l.start() lidarDriver = l _lidarItem = LidarItem(l) om.addToObjectModel(_lidarItem, sensorsFolder) useMapServer = hasattr(drc, 'vtkMapServerSource') if useMapServer: mapServerSource = MapServerSource(view, callbackFunc=view.render) mapsServerContainer = om.ObjectModelItem('Map Server', icon=om.Icons.Robot) mapsServerContainer.source = mapServerSource om.addToObjectModel(mapsServerContainer, parentObj=sensorsFolder) mapServerSource.start() else: mapServerSource = None spindleDebug = SpindleAxisDebug(multisenseDriver) spindleDebug.addToView(view) om.addToObjectModel(spindleDebug, sensorsFolder) return multisenseDriver, mapServerSource
def loadObjectMeshes(self): stream = file(self.pathDict['registrationResult']) registrationResult = yaml.load(stream) folder = om.getOrCreateContainer('data files') for objName, data in registrationResult.iteritems(): filename = data['filename'] if len(filename) == 0: filename = utils.getObjectMeshFilename(self.objectData, objName) else: filename = os.path.join(utils.getLabelFusionDataDir(), filename) polyData = ioUtils.readPolyData(filename) color = self.getColorFromIndex(objName) obj = vis.showPolyData(polyData, name=objName, parent=folder, color=color) self.storedColors[objName] = color objToWorld = transformUtils.transformFromPose(*data['pose']) self.objectToWorld[objName] = objToWorld obj.actor.SetUserTransform(objToWorld) outlineFilter = vtk.vtkOutlineFilter() outlineFilter.SetInput(polyData) outlineFilter.Update() bbox = vis.showPolyData(outlineFilter.GetOutput(), objName + ' bbox', parent=obj) bbox.actor.GetProperty().SetLineWidth(1) bbox.actor.SetUserTransform(objToWorld) bbox.setProperty('Color', [1, 1, 1]) bbox.setProperty('Alpha', 0.8) bbox.setProperty('Visible', False)
def autonomousExecuteRoomMap(self): self.graspingHand = 'left' self.targetSweepType = 'orientation' self.graspToHandLinkFrame = self.ikPlanner.newGraspToHandFrame( self.graspingHand) self.planFromCurrentRobotState = True self.visOnly = False self.ikPlanner.ikServer.maxDegreesPerSecond = 3 #5 self.currentYawDegrees = 60 self.fromTop = True self.mapFolder = om.getOrCreateContainer('room mapping') taskQueue = AsyncTaskQueue() self.addTasksToQueueSweep(taskQueue) self.addTasksToQueueSweep(taskQueue) self.addTasksToQueueSweep(taskQueue) self.addTasksToQueueSweep(taskQueue) self.addTasksToQueueSweep(taskQueue) self.addTasksToQueueSweep(taskQueue) self.addTasksToQueueSweep(taskQueue) taskQueue.addTask(self.printAsync('done!')) taskQueue.addTask(self.doneIndicator) return taskQueue