def onMergeIntoPointCloud(): allPointClouds = om.findObjectByName('point clouds') if allPointClouds: allPointClouds = [ i.getProperty('Name') for i in allPointClouds.children() ] sel = QtGui.QInputDialog.getItem(None, "Point Cloud Merging", "Pick point cloud to merge into:", allPointClouds, current=0, editable=False) sel = om.findObjectByName(sel) # Make a copy of each in same frame polyDataInto = vtk.vtkPolyData() polyDataInto.ShallowCopy(sel.polyData) if sel.getChildFrame(): polyDataInto = segmentation.transformPolyData( polyDataInto, sel.getChildFrame().transform) polyDataFrom = vtk.vtkPolyData() polyDataFrom.DeepCopy(pointCloudObj.polyData) if pointCloudObj.getChildFrame(): polyDataFrom = segmentation.transformPolyData( polyDataFrom, pointCloudObj.getChildFrame().transform) # Actual merge append = filterUtils.appendPolyData([polyDataFrom, polyDataInto]) if sel.getChildFrame(): polyDataInto = segmentation.transformPolyData( polyDataInto, sel.getChildFrame().transform.GetInverse()) # resample append = segmentationroutines.applyVoxelGrid(append, 0.01) append = segmentation.addCoordArraysToPolyData(append) # Recenter the frame sel.setPolyData(append) t = vtk.vtkTransform() t.PostMultiply() t.Translate(filterUtils.computeCentroid(append)) segmentation.makeMovable(sel, t) # Hide the old one if pointCloudObj.getProperty('Name') in allPointClouds: pointCloudObj.setProperty('Visible', False)
def extractClusters(polyData, clusterInXY=False, **kwargs): ''' Segment a single point cloud into smaller clusters using Euclidean Clustering ''' if not polyData.GetNumberOfPoints(): return [] if (clusterInXY == True): ''' If Points are seperated in X&Y, then cluster outside this ''' polyDataXY = vtk.vtkPolyData() polyDataXY.DeepCopy(polyData) points=vtkNumpy.getNumpyFromVtk(polyDataXY , 'Points') # shared memory points[:,2] = 0.0 #showPolyData(polyDataXY, 'polyDataXY', visible=False, parent=getDebugFolder()) polyDataXY = applyEuclideanClustering(polyDataXY, **kwargs) clusterLabels = vtkNumpy.getNumpyFromVtk(polyDataXY, 'cluster_labels') vtkNumpy.addNumpyToVtk(polyData, clusterLabels, 'cluster_labels') else: polyData = applyEuclideanClustering(polyData, **kwargs) clusterLabels = vtkNumpy.getNumpyFromVtk(polyData, 'cluster_labels') clusters = [] for i in xrange(1, clusterLabels.max() + 1): cluster = thresholdPoints(polyData, 'cluster_labels', [i, i]) clusters.append(cluster) return clusters
def __init__(self, name, view): AffordanceItem.__init__(self, name, vtk.vtkPolyData(), view) self.setProperty('Collision Enabled', False) self.addProperty('Radius', 0.15, attributes=om.PropertyAttributes(decimals=3, singleStep=0.01, minimum=0.0, maximum=1e4)) self.addProperty('Tube Radius', 0.02, attributes=om.PropertyAttributes(decimals=3, singleStep=0.01, minimum=0.0, maximum=1e4)) self.addProperty('Segments', 8, attributes=om.PropertyAttributes(decimals=3, singleStep=1, minimum=3, maximum=100)) self.properties.setPropertyIndex('Radius', 0) self.properties.setPropertyIndex('Tube Radius', 1) self.properties.setPropertyIndex('Segments', 2) self.updateGeometryFromProperties()
def __init__(self, name, view): AffordanceItem.__init__(self, name, vtk.vtkPolyData(), view) self.addProperty('Dimensions', [0.25, 0.25, 0.25], attributes=om.PropertyAttributes(decimals=3, singleStep=0.01, minimum=0.0, maximum=1e4)) self.addProperty('Subdivisions', 0, attributes=om.PropertyAttributes(minimum=0, maximum=1000)) self.properties.setPropertyIndex('Dimensions', 0) self.properties.setPropertyIndex('Subdivisions', 1) self.updateGeometryFromProperties()
def getStereoPointCloudElapsed(self,decimation=4, imagesChannel='CAMERA', cameraName='CAMERA_LEFT', removeSize=0): q = imageManager.queue utime = q.getCurrentImageTime(cameraName) if utime == 0: return None, None, None if (utime - self.lastUtime < 1E6): return None, None, None p = vtk.vtkPolyData() cameraToLocalFused = vtk.vtkTransform() q.getTransform('CAMERA_LEFT_ALT', 'local', utime, cameraToLocalFused) cameraToLocal = vtk.vtkTransform() q.getTransform('CAMERA_LEFT', 'local', utime, cameraToLocal) prevToCurrentCameraTransform = vtk.vtkTransform() prevToCurrentCameraTransform.PostMultiply() prevToCurrentCameraTransform.Concatenate( cameraToLocal ) prevToCurrentCameraTransform.Concatenate( self.lastCameraToLocal.GetLinearInverse() ) distTravelled = np.linalg.norm( prevToCurrentCameraTransform.GetPosition() ) # 0.2 heavy overlap # 0.5 quite a bit of overlap # 1.0 is good if (distTravelled < 0.2 ): return None, None, None q.getPointCloudFromImages(imagesChannel, p, decimation, removeSize) self.lastCameraToLocal = cameraToLocal self.lastUtime = utime return p, cameraToLocalFused, cameraToLocal
def newPolyData(self, name, view, parent=None): self.handModel.model.setJointPositions( np.zeros(self.handModel.model.numberOfJoints())) polyData = vtk.vtkPolyData() self.handModel.model.getModelMesh(polyData) polyData = filterUtils.transformPolyData(polyData, self.modelToPalm) if isinstance(parent, str): parent = om.getOrCreateContainer(parent) color = [1.0, 1.0, 0.0] if self.side == 'right': color = [0.33, 1.0, 0.0] obj = vis.showPolyData(polyData, name, view=view, color=color, visible=False, parent=parent) obj.side = self.side frame = vtk.vtkTransform() frame.PostMultiply() obj.actor.SetUserTransform(frame) frameObj = vis.showFrame(frame, '%s frame' % name, view=view, scale=0.2, visible=False, parent=obj) return obj
def __init__(self, name, transform, view): PolyDataItem.__init__(self, name, vtk.vtkPolyData(), view) self.transform = transform self._blockSignals = False self.actor.SetUserTransform(transform) self.widget = vtk.vtkFrameWidget() self.widget.CreateDefaultRepresentation() self.widget.EnabledOff() self.rep = self.widget.GetRepresentation() self.rep.SetTransform(transform) self.traceData = None self._frameSync = None self.addProperty('Scale', 1.0, attributes=om.PropertyAttributes(decimals=2, minimum=0.01, maximum=100, singleStep=0.1, hidden=False)) self.addProperty('Edit', False) self.addProperty('Trace', False) self.addProperty('Tube', False) self.properties.setPropertyIndex('Edit', 0) self.properties.setPropertyIndex('Trace', 1) self.properties.setPropertyIndex('Tube', 2) self.callbacks.addSignal('FrameModified') self.onTransformModifiedCallback = None self.observerTag = self.transform.AddObserver('ModifiedEvent', self.onTransformModified) self._updateAxesGeometry() self.setProperty('Color By', 'Axes') self.setProperty('Icon', om.Icons.Axes)
def extractClusters(polyData, clusterInXY=False, **kwargs): ''' Segment a single point cloud into smaller clusters using Euclidean Clustering ''' if not polyData.GetNumberOfPoints(): return [] if (clusterInXY == True): ''' If Points are seperated in X&Y, then cluster outside this ''' polyDataXY = vtk.vtkPolyData() polyDataXY.DeepCopy(polyData) points = vtkNumpy.getNumpyFromVtk(polyDataXY, 'Points') # shared memory points[:, 2] = 0.0 #showPolyData(polyDataXY, 'polyDataXY', visible=False, parent=getDebugFolder()) polyDataXY = applyEuclideanClustering(polyDataXY, **kwargs) clusterLabels = vtkNumpy.getNumpyFromVtk(polyDataXY, 'cluster_labels') vtkNumpy.addNumpyToVtk(polyData, clusterLabels, 'cluster_labels') else: polyData = applyEuclideanClustering(polyData, **kwargs) clusterLabels = vtkNumpy.getNumpyFromVtk(polyData, 'cluster_labels') clusters = [] for i in xrange(1, clusterLabels.max() + 1): cluster = thresholdPoints(polyData, 'cluster_labels', [i, i]) clusters.append(cluster) return clusters
def showHandCloud(hand='left', view=None): view = view or app.getCurrentRenderView() if view is None: return assert hand in ('left', 'right') maps = om.findObjectByName('Map Server') assert maps is not None viewId = 52 if hand == 'left' else 53 reader = maps.source.reader def getCurrentViewId(): return reader.GetCurrentMapId(viewId) p = vtk.vtkPolyData() obj = showPolyData(p, '%s hand cloud' % hand, view=view, parent='sensors') obj.currentViewId = -1 def updateCloud(): currentViewId = getCurrentViewId() #print 'updateCloud: current view id:', currentViewId if currentViewId != obj.currentViewId: reader.GetDataForMapId(viewId, currentViewId, p) #print 'updated poly data. %d points.' % p.GetNumberOfPoints() obj._renderAllViews() t = TimerCallback() t.targetFps = 1 t.callback = updateCloud t.start() obj.updater = t return obj
def onCopyPointCloud(): global lastRandomColor polyData = vtk.vtkPolyData() polyData.DeepCopy(pointCloudObj.polyData) if pointCloudObj.getChildFrame(): polyData = segmentation.transformPolyData( polyData, pointCloudObj.getChildFrame().transform) polyData = segmentation.addCoordArraysToPolyData(polyData) # generate random color, and average with a common color to make them generally similar lastRandomColor = lastRandomColor + 0.1 + 0.1 * random.random() rgb = colorsys.hls_to_rgb(lastRandomColor, 0.7, 1.0) obj = vis.showPolyData(polyData, pointCloudObj.getProperty('Name') + ' copy', color=rgb, parent='point clouds') #t = vtk.vtkTransform() #t.PostMultiply() #t.Translate(filterUtils.computeCentroid(polyData)) #segmentation.makeMovable(obj, t) om.setActiveObject(obj) pickedObj.setProperty('Visible', False)
def __init__(self, name, view): AffordanceItem.__init__(self, name, vtk.vtkPolyData(), view) self.addProperty('Radius', 0.03, attributes=om.PropertyAttributes(decimals=3, singleStep=0.01, minimum=0.0, maximum=1e4)) self.addProperty('Length', 0.5, attributes=om.PropertyAttributes(decimals=3, singleStep=0.01, minimum=0.0, maximum=1e4)) self.properties.setPropertyIndex('Radius', 0) self.properties.setPropertyIndex('Length', 1) self.updateGeometryFromProperties()
def __init__(self, name, view): AffordanceItem.__init__(self, name, vtk.vtkPolyData(), view) self.setProperty('Collision Enabled', False) self.addProperty('Filename', '') self.properties.setPropertyIndex('Filename', 0) # attempt to reload geometry if it is currently empty if self.getProperty('Filename') and not self.polyData.GetNumberOfPoints(): self.updateGeometryFromProperties()
def setupObjectInCamera(self, obj): imageView = self.imageView obj = vis.updatePolyData(vtk.vtkPolyData(), self.getTransformedName(obj), view=imageView.view, color=obj.getProperty('Color'), parent=self.getFolderName(), visible=obj.getProperty('Visible')) self.addActorToImageOverlay(obj, imageView) return obj
def __init__(self, name, view): AffordanceItem.__init__(self, name, vtk.vtkPolyData(), view) self.setProperty('Collision Enabled', False) self.addProperty('Filename', '') self.properties.setPropertyIndex('Filename', 0) # attempt to reload geometry if it is currently empty if self.getProperty( 'Filename') and not self.polyData.GetNumberOfPoints(): self.updateGeometryFromProperties()
def __init__(self, name, view): AffordanceItem.__init__(self, name, vtk.vtkPolyData(), view) self.addProperty('Radius', 0.15, attributes=om.PropertyAttributes(decimals=3, singleStep=0.01, minimum=0.0, maximum=1e4)) self.properties.setPropertyIndex('Radius', 0) self.updateGeometryFromProperties()
def getTraceData(self): t = self.frame.findChild(self.traceName) if not t: pts = vtk.vtkPoints() pts.SetDataTypeToDouble() pts.InsertNextPoint(self.frame.transform.GetPosition()) pd = vtk.vtkPolyData() pd.SetPoints(pts) pd.SetLines(vtk.vtkCellArray()) t = showPolyData(pd, self.traceName, parent=self.frame) return t
def _updateSource(self): p = vtk.vtkPolyData() utime = self.PointCloudQueue.getPointCloudFromPointCloud(p) if not p.GetNumberOfPoints(): return sensorToLocalFused = vtk.vtkTransform() self.queue.getTransform('VELODYNE', 'local', utime, sensorToLocalFused) p = filterUtils.transformPolyData(p,sensorToLocalFused) self.polyDataObj.setPolyData(p) if not self.polyDataObj.initialized: self.polyDataObj.initialized = True
def _updateSource(self): p = vtk.vtkPolyData() utime = self.PointCloudQueue.getPointCloudFromPointCloud(p) if not p.GetNumberOfPoints(): return sensorToLocalFused = vtk.vtkTransform() self.queue.getTransform('VELODYNE', 'local', utime, sensorToLocalFused) p = filterUtils.transformPolyData(p, sensorToLocalFused) self.polyDataObj.setPolyData(p) if not self.polyDataObj.initialized: self.polyDataObj.initialized = True
def onMergeIntoPointCloud(): allPointClouds = om.findObjectByName('point clouds') if allPointClouds: allPointClouds = [i.getProperty('Name') for i in allPointClouds.children()] sel = QtGui.QInputDialog.getItem(None, "Point Cloud Merging", "Pick point cloud to merge into:", allPointClouds, current=0, editable=False) sel = om.findObjectByName(sel) # Make a copy of each in same frame polyDataInto = vtk.vtkPolyData() polyDataInto.ShallowCopy(sel.polyData) if sel.getChildFrame(): polyDataInto = segmentation.transformPolyData(polyDataInto, sel.getChildFrame().transform) polyDataFrom = vtk.vtkPolyData() polyDataFrom.DeepCopy(pointCloudObj.polyData) if pointCloudObj.getChildFrame(): polyDataFrom = segmentation.transformPolyData(polyDataFrom, pointCloudObj.getChildFrame().transform) # Actual merge append = filterUtils.appendPolyData([polyDataFrom, polyDataInto]) if sel.getChildFrame(): polyDataInto = segmentation.transformPolyData(polyDataInto, sel.getChildFrame().transform.GetInverse()) # resample append = segmentationroutines.applyVoxelGrid(append, 0.01) append = segmentation.addCoordArraysToPolyData(append) # Recenter the frame sel.setPolyData(append) t = vtk.vtkTransform() t.PostMultiply() t.Translate(filterUtils.computeCentroid(append)) segmentation.makeMovable(sel, t) # Hide the old one if pointCloudObj.getProperty('Name') in allPointClouds: pointCloudObj.setProperty('Visible', False)
def _updateSource(self): p = vtk.vtkPolyData() utime = self.KinectQueue.getPointCloudFromKinect(p) if not p.GetNumberOfPoints(): return cameraToLocalFused = vtk.vtkTransform() self.queue.getTransform('KINECT_RGB', 'local', utime, cameraToLocalFused) p = filterUtils.transformPolyData(p,cameraToLocalFused) self.polyDataObj.setPolyData(p) if not self.polyDataObj.initialized: self.polyDataObj.setProperty('Color By', 'rgb_colors') self.polyDataObj.initialized = True
def decodePolyData(data): '''Given a numpy int8 array, deserializes the data to construct a new vtkPolyData object and returns the result.''' if not hasattr(vtk, 'vtkCommunicator'): r = vtk.vtkPolyDataReader() r.ReadFromInputStringOn() r.SetInputString(str(data.data)) r.Update() return shallowCopy(r.GetOutput()) charArray = vnp.getVtkFromNumpy(data) assert isinstance(charArray, vtk.vtkCharArray) polyData = vtk.vtkPolyData() vtk.vtkCommunicator.UnMarshalDataObject(charArray, polyData) return polyData
def getStereoPointCloud(decimation=4, imagesChannel='CAMERA', cameraName='CAMERA_LEFT', removeSize=0): q = imageManager.queue utime = q.getCurrentImageTime(cameraName) if utime == 0: return None p = vtk.vtkPolyData() cameraToLocal = vtk.vtkTransform() q.getPointCloudFromImages(imagesChannel, p, decimation, removeSize) q.getTransform('CAMERA_LEFT', 'local', utime, cameraToLocal) p = filterUtils.transformPolyData(p, cameraToLocal) return p
def _updateSource(self): p = vtk.vtkPolyData() utime = self.KinectQueue.getPointCloudFromKinect(p) if not p.GetNumberOfPoints(): return cameraToLocalFused = vtk.vtkTransform() self.queue.getTransform('KINECT_RGB', 'local', utime, cameraToLocalFused) p = filterUtils.transformPolyData(p, cameraToLocalFused) self.polyDataObj.setPolyData(p) if not self.polyDataObj.initialized: self.polyDataObj.setProperty('Color By', 'rgb_colors') self.polyDataObj.initialized = True
def newPolyData(self, name, view, parent=None): self.handModel.model.setJointPositions(np.zeros(self.handModel.model.numberOfJoints())) polyData = vtk.vtkPolyData() self.handModel.model.getModelMesh(polyData) polyData = filterUtils.transformPolyData(polyData, self.modelToPalm) if isinstance(parent, str): parent = om.getOrCreateContainer(parent) color = [1.0, 1.0, 0.0] if self.side == 'right': color = [0.33, 1.0, 0.0] obj = vis.showPolyData(polyData, name, view=view, color=color, visible=False, parent=parent) obj.side = self.side frame = vtk.vtkTransform() frame.PostMultiply() obj.actor.SetUserTransform(frame) frameObj = vis.showFrame(frame, '%s frame' % name, view=view, scale=0.2, visible=False, parent=obj) return obj
def __init__(self, view, _PointCloudQueue): self.view = view self.PointCloudQueue = _PointCloudQueue self.visible = True self.p = vtk.vtkPolyData() utime = PointCloudQueue.getPointCloudFromPointCloud(self.p) self.polyDataObj = vis.PolyDataItem('pointcloud source', shallowCopy(self.p), view) self.polyDataObj.actor.SetPickable(1) self.polyDataObj.initialized = False om.addToObjectModel(self.polyDataObj) self.queue = PythonQt.dd.ddBotImageQueue(lcmUtils.getGlobalLCMThread()) self.queue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file) self.targetFps = 30 self.timerCallback = TimerCallback(targetFps=self.targetFps) self.timerCallback.callback = self._updateSource
def onCopyPointCloud(): global lastRandomColor polyData = vtk.vtkPolyData() polyData.DeepCopy(pointCloudObj.polyData) if pointCloudObj.getChildFrame(): polyData = segmentation.transformPolyData(polyData, pointCloudObj.getChildFrame().transform) polyData = segmentation.addCoordArraysToPolyData(polyData) # generate random color, and average with a common color to make them generally similar lastRandomColor = lastRandomColor + 0.1 + 0.1*random.random() rgb = colorsys.hls_to_rgb(lastRandomColor, 0.7, 1.0) obj = vis.showPolyData(polyData, pointCloudObj.getProperty('Name') + ' copy', color=rgb, parent='point clouds') t = vtk.vtkTransform() t.PostMultiply() t.Translate(filterUtils.computeCentroid(polyData)) segmentation.makeMovable(obj, t) om.setActiveObject(obj) pickedObj.setProperty('Visible', False)
def updateGeometryFromProperties(self): filename = self.getProperty('Filename') if not filename: polyData = vtk.vtkPolyData() else: polyData = self.getMeshManager().get(filename) if not polyData: if not os.path.isabs(filename): filename = os.path.join(ddapp.getDRCBaseDir(), filename) if os.path.isfile(filename): polyData = ioUtils.readPolyData(filename) else: # use axes as a placeholder mesh d = DebugData() d.addFrame(vtk.vtkTransform(), scale=0.1, tubeRadius=0.005) polyData = d.getPolyData() self.setPolyData(polyData)
def __init__(self, view, _KinectQueue): self.view = view self.KinectQueue = _KinectQueue self.visible = True self.p = vtk.vtkPolyData() utime = KinectQueue.getPointCloudFromKinect(self.p) self.polyDataObj = vis.PolyDataItem('kinect source', shallowCopy(self.p), view) self.polyDataObj.actor.SetPickable(1) self.polyDataObj.initialized = False om.addToObjectModel(self.polyDataObj) self.queue = PythonQt.dd.ddBotImageQueue(lcmUtils.getGlobalLCMThread()) self.queue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file) self.targetFps = 30 self.timerCallback = TimerCallback(targetFps=self.targetFps) self.timerCallback.callback = self._updateSource
def getStereoPointCloudElapsed(self, decimation=4, imagesChannel='CAMERA', cameraName='CAMERA_LEFT', removeSize=0): q = imageManager.queue utime = q.getCurrentImageTime(cameraName) if utime == 0: return None, None, None if (utime - self.lastUtime < 1E6): return None, None, None p = vtk.vtkPolyData() cameraToLocalFused = vtk.vtkTransform() q.getTransform('CAMERA_LEFT_ALT', 'local', utime, cameraToLocalFused) cameraToLocal = vtk.vtkTransform() q.getTransform('CAMERA_LEFT', 'local', utime, cameraToLocal) prevToCurrentCameraTransform = vtk.vtkTransform() prevToCurrentCameraTransform.PostMultiply() prevToCurrentCameraTransform.Concatenate(cameraToLocal) prevToCurrentCameraTransform.Concatenate( self.lastCameraToLocal.GetLinearInverse()) distTravelled = np.linalg.norm( prevToCurrentCameraTransform.GetPosition()) # 0.2 heavy overlap # 0.5 quite a bit of overlap # 1.0 is good if (distTravelled < 0.2): return None, None, None q.getPointCloudFromImages(imagesChannel, p, decimation, removeSize) self.lastCameraToLocal = cameraToLocal self.lastUtime = utime return p, cameraToLocalFused, cameraToLocal
def getNewHandPolyData(self): self.handModel.model.setJointPositions(np.zeros(self.handModel.model.numberOfJoints())) polyData = vtk.vtkPolyData() self.handModel.model.getModelMesh(polyData) return filterUtils.transformPolyData(polyData, self.modelToPalm)
def getNewHandPolyData(self): self.handModel.model.setJointPositions( np.zeros(self.handModel.model.numberOfJoints())) polyData = vtk.vtkPolyData() self.handModel.model.getModelMesh(polyData) return filterUtils.transformPolyData(polyData, self.modelToPalm)