def removePlaneAndBeyond(polyData, expectedNormal=[1,0,0], filterRange=[-np.inf, -0.03], whichAxis=1, whichAxisLetter='y', percentile = 95): yvalues = vnp.getNumpyFromVtk(polyData, 'Points')[:, whichAxis] backY = np.percentile(yvalues, percentile) if ( percentile > 50): searchRegion = segmentation.thresholdPoints(polyData, whichAxisLetter, [backY - 0.1, np.inf]) else: searchRegion = segmentation.thresholdPoints(polyData, whichAxisLetter, [-np.inf, backY + 0.1]) vis.updatePolyData(searchRegion, 'search region', parent="segmentation", colorByName=whichAxisLetter, visible=False) # find the plane of the back wall, remove it and the points behind it: _, origin, normal = segmentation.applyPlaneFit(searchRegion, distanceThreshold=0.02, expectedNormal=expectedNormal, perpendicularAxis=expectedNormal, returnOrigin=True) points = vnp.getNumpyFromVtk(polyData, 'Points') dist = np.dot(points - origin, normal) vnp.addNumpyToVtk(polyData, dist, 'dist_to_plane') backFrame = transformUtils.getTransformFromOriginAndNormal(origin, normal, normalAxis=2) vis.updateFrame(backFrame, 'back frame', parent='segmentation', scale=0.15 , visible=False) vis.updatePolyData(polyData, 'dist to back', parent='segmentation', visible=False) polyData = segmentation.thresholdPoints(polyData, 'dist_to_plane', filterRange) vis.updatePolyData(polyData, 'back off and all', parent='segmentation', visible=False) return polyData
def getSphereGeometry(self, imageName): sphereObj = self.sphereObjects.get(imageName) if sphereObj: return sphereObj if not self.imageManager.getImage(imageName).GetDimensions()[0]: return None sphereResolution = 50 sphereRadii = {"CAMERA_LEFT": 20, "CAMERACHEST_LEFT": 20, "CAMERACHEST_RIGHT": 20} geometry = makeSphere(sphereRadii[imageName], sphereResolution) self.imageManager.queue.computeTextureCoords(imageName, geometry) tcoordsArrayName = "tcoords_%s" % imageName vtkNumpy.addNumpyToVtk(geometry, vtkNumpy.getNumpyFromVtk(geometry, tcoordsArrayName)[:, 0].copy(), "tcoords_U") vtkNumpy.addNumpyToVtk(geometry, vtkNumpy.getNumpyFromVtk(geometry, tcoordsArrayName)[:, 1].copy(), "tcoords_V") geometry = clipRange(geometry, "tcoords_U", [0.0, 1.0]) geometry = clipRange(geometry, "tcoords_V", [0.0, 1.0]) geometry.GetPointData().SetTCoords(geometry.GetPointData().GetArray(tcoordsArrayName)) sphereObj = vis.showPolyData(geometry, imageName, view=self.view, parent="cameras") sphereObj.actor.SetTexture(self.imageManager.getTexture(imageName)) sphereObj.actor.GetProperty().LightingOff() self.view.renderer().RemoveActor(sphereObj.actor) rendererId = 2 - self.sphereImages.index(imageName) self.renderers[rendererId].AddActor(sphereObj.actor) self.sphereObjects[imageName] = sphereObj return sphereObj
def fitObjectsOnShelf(polyData, maxHeight = 0.25): # find the shelf plane: polyDataWithoutFront, _ = segmentation.removeMajorPlane(polyData, distanceThreshold=0.02) polyDataPlaneFit, origin, normal = segmentation.applyPlaneFit(polyDataWithoutFront, expectedNormal=np.array([0.0,0.0,1.0]), perpendicularAxis=np.array([0.0,0.0,1.0]), returnOrigin=True) vis.updatePolyData(polyDataPlaneFit, 'polyDataPlaneFit', parent='segmentation', visible=False) shelfSurfacePoints = segmentation.thresholdPoints(polyDataPlaneFit, 'dist_to_plane', [-0.01, 0.01]) shelfCenter = segmentation.computeCentroid(shelfSurfacePoints) shelfFrame = transformUtils.getTransformFromOriginAndNormal(shelfCenter, normal, normalAxis=2) vis.showFrame(shelfFrame, 'shelfFrame', parent='segmentation', scale=0.15 , visible=False) # find the points near to the shelf plane and find objects on it: points = vnp.getNumpyFromVtk(polyData, 'Points') dist = np.dot(points - origin, normal) vnp.addNumpyToVtk(polyData, dist, 'dist_to_plane') shelfPoints = segmentation.thresholdPoints(polyData, 'dist_to_plane', [-0.01, maxHeight]) vis.updatePolyData(shelfPoints, 'shelf', parent='segmentation', visible=False) data = segmentation.segmentTableScene(shelfPoints, shelfCenter, filterClustering = False ) vis.showClusterObjects(data.clusters + [data.table], parent='segmentation') # remove the points that we considered from the orginal cloud dists = vnp.getNumpyFromVtk(polyData, 'dist_to_plane') diffShelf = ( ((dists > maxHeight) + (dists < -0.01))) + 0.1 -0.1 vnp.addNumpyToVtk(polyData, diffShelf, 'diff_shelf') polyData = segmentation.thresholdPoints(polyData, 'diff_shelf', [1, 1]) vis.updatePolyData(polyData, 'rest', parent='segmentation', visible=False) return polyData
def pickArea(self, startPos, endPos): self.iren.SetInteractorStyle(self.prevStyle) picker = vtk.vtkAreaPicker() picker.AreaPick(min(startPos[0], endPos[0]), min(startPos[1], endPos[1]), max(startPos[0], endPos[0]), max(startPos[1], endPos[1]), self.view.renderer()) frustum = picker.GetFrustum() extractGeometry = vtk.vtkExtractPolyDataGeometry() extractGeometry.SetImplicitFunction(frustum) extractGeometry.SetInputData(self.polyData) extractGeometry.ExtractBoundaryCellsOn() extractGeometry.Update() selected = filterUtils.cleanPolyData(extractGeometry.GetOutput()) if not selected.GetNumberOfPoints(): return pickedIds = vnp.getNumpyFromVtk(selected, 'point_ids') vnp.getNumpyFromVtk(self.polyData, 'is_selected')[pickedIds] = self.selectMode selection = self.getSelectedPoints() if not self.selectionObj: self.selectionObj = vis.showPolyData(selection, 'selected points', color=self.selectionColor, parent='selection') self.selectionObj.setProperty('Point Size', self.selectionPointSize) else: self.selectionObj.setPolyData(selection)
def shiftPointsToOrigin(polyData, copy=True, shuffle=True): points = None if copy: points = vnp.getNumpyFromVtk(polyData, 'Points').copy() else: points = vnp.getNumpyFromVtk(polyData, 'Points') if shuffle: np.random.shuffle(points) points -= np.average(points, axis=0) return vnp.numpyToPolyData(points, createVertexCells=True)
def computePointToPointDistance(pointsPolyData, searchPolyData): cl = vtk.vtkPointLocator() cl.SetDataSet(searchPolyData) cl.BuildLocator() points = vnp.getNumpyFromVtk(pointsPolyData, 'Points') searchPoints = vnp.getNumpyFromVtk(searchPolyData, 'Points') closestPoints = np.zeros((len(points), 3)) for i in xrange(len(points)): ptId = cl.FindClosestPoint(points[i]) closestPoints[i] = searchPoints[ptId] return np.linalg.norm(closestPoints - points, axis=1)
def getSphereGeometry(self, imageName): sphereObj = self.sphereObjects.get(imageName) if sphereObj: return sphereObj if not self.imageManager.getImage(imageName, self.robotName).GetDimensions()[0]: return None sphereResolution = 50 sphereRadii = 20 geometry = makeSphere(sphereRadii, sphereResolution) self.imageManager.queue[ self.robotName][imageName].compute_texture_coords( imageName, geometry) tcoordsArrayName = "tcoords_%s" % imageName vtkNumpy.addNumpyToVtk( geometry, vtkNumpy.getNumpyFromVtk(geometry, tcoordsArrayName)[:, 0].copy(), "tcoords_U", ) vtkNumpy.addNumpyToVtk( geometry, vtkNumpy.getNumpyFromVtk(geometry, tcoordsArrayName)[:, 1].copy(), "tcoords_V", ) geometry = clipRange(geometry, "tcoords_U", [0.0, 1.0]) geometry = clipRange(geometry, "tcoords_V", [0.0, 1.0]) geometry.GetPointData().SetTCoords( geometry.GetPointData().GetArray(tcoordsArrayName)) sphereObj = vis.showPolyData(geometry, imageName, view=self.view, parent="cameras") sphereObj.actor.SetTexture( self.imageManager.getTexture(imageName, self.robotName)) sphereObj.actor.GetProperty().LightingOff() self.view.renderer().RemoveActor(sphereObj.actor) rendererId = 2 - self.images.index(imageName) self.renderers[rendererId].AddActor(sphereObj.actor) self.sphereObjects[imageName] = sphereObj return sphereObj
def addHSVArrays(polyData, rgbArrayName='rgb_colors'): import colorsys rgb = vnp.getNumpyFromVtk(polyData, rgbArrayName) / 255.0 hsv = np.array([colorsys.rgb_to_hsv(*t) for t in rgb]) vnp.addNumpyToVtk(polyData, hsv[:, 0].copy(), 'hue') vnp.addNumpyToVtk(polyData, hsv[:, 1].copy(), 'saturation') vnp.addNumpyToVtk(polyData, hsv[:, 2].copy(), 'value')
def labelNonFinitePoints(polyData, arrayName='Points'): ''' adds is_nonfinite label to polyData. non finite includes nan and +/- inf. ''' pts = vnp.getNumpyFromVtk(polyData, arrayName) labels = np.logical_not(np.isfinite(pts)).any(axis=1) vnp.addNumpyToVtk(polyData, np.array(labels, dtype=np.int32), 'is_nonfinite')
def saveConvexHulls(chulls, outputDir): if os.path.isdir(outputDir): print 'removing directory:', outputDir shutil.rmtree(outputDir) print 'making directory:', outputDir os.makedirs(outputDir) for i, chull in enumerate(chulls): chull, plane = chull.convexHull, chull.plane origin = plane.GetOrigin() normal = plane.GetNormal() chullPoints = vnp.getNumpyFromVtk(chull, 'Points') assert np.allclose(np.linalg.norm(normal), 1.0) output = np.vstack([normal, chullPoints]) outputFilename = os.path.join(outputDir, 'plane_%04d.txt' % i) np.savetxt(outputFilename, output) ioUtils.writePolyData(getMergedConvexHullsMesh(chulls), os.path.join(outputDir, 'merged_planes.ply'))
def writePointsFile(polyData, filename): points = vnp.getNumpyFromVtk(polyData, 'Points') f = open(filename, 'w') f.write('%d\n' % len(points)) for p in points: f.write('%f %f %f\n' % (p[0], p[1], p[2])) # np.savetxt(f, points) f.close()
def getSphereGeometry(self, imageName): sphereObj = self.sphereObjects.get(imageName) if sphereObj: return sphereObj if not self.imageManager.getImage(imageName).GetDimensions()[0]: return None sphereResolution = 50 sphereRadii = { 'CAMERA_LEFT': 20, 'CAMERACHEST_LEFT': 20, 'CAMERACHEST_RIGHT': 20 } geometry = makeSphere(sphereRadii[imageName], sphereResolution) self.imageManager.queue.computeTextureCoords(imageName, geometry) tcoordsArrayName = 'tcoords_%s' % imageName vtkNumpy.addNumpyToVtk( geometry, vtkNumpy.getNumpyFromVtk(geometry, tcoordsArrayName)[:, 0].copy(), 'tcoords_U') vtkNumpy.addNumpyToVtk( geometry, vtkNumpy.getNumpyFromVtk(geometry, tcoordsArrayName)[:, 1].copy(), 'tcoords_V') geometry = clipRange(geometry, 'tcoords_U', [0.0, 1.0]) geometry = clipRange(geometry, 'tcoords_V', [0.0, 1.0]) geometry.GetPointData().SetTCoords( geometry.GetPointData().GetArray(tcoordsArrayName)) sphereObj = vis.showPolyData(geometry, imageName, view=self.view, parent='cameras') sphereObj.actor.SetTexture(self.imageManager.getTexture(imageName)) sphereObj.actor.GetProperty().LightingOff() self.view.renderer().RemoveActor(sphereObj.actor) rendererId = 2 - self.sphereImages.index(imageName) self.renderers[rendererId].AddActor(sphereObj.actor) self.sphereObjects[imageName] = sphereObj return sphereObj
def onNewWalkingGoal(self, walkingGoal=None): walkingGoal = walkingGoal or self.newWalkingGoalFrame(self.robotModel) frameObj = vis.updateFrame(walkingGoal, 'walking goal', parent='planning', scale=0.25) frameObj.setProperty('Edit', True) rep = frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) frameObj.widget.HandleRotationEnabledOff() if self.placer: self.placer.stop() terrain = om.findObjectByName('HEIGHT_MAP_SCENE') if terrain: pos = np.array(frameObj.transform.GetPosition()) polyData = filterUtils.removeNonFinitePoints(terrain.polyData) if polyData.GetNumberOfPoints(): polyData = segmentation.labelDistanceToLine( polyData, pos, pos + [0, 0, 1]) polyData = segmentation.thresholdPoints( polyData, 'distance_to_line', [0.0, 0.1]) if polyData.GetNumberOfPoints(): pos[2] = np.nanmax( vnp.getNumpyFromVtk(polyData, 'Points')[:, 2]) frameObj.transform.Translate( pos - np.array(frameObj.transform.GetPosition())) d = DebugData() d.addSphere((0, 0, 0), radius=0.03) handle = vis.showPolyData(d.getPolyData(), 'walking goal terrain handle', parent=frameObj, visible=True, color=[1, 1, 0]) handle.actor.SetUserTransform(frameObj.transform) self.placer = PlacerWidget(app.getCurrentRenderView(), handle, terrain) def onFramePropertyModified(propertySet, propertyName): if propertyName == 'Edit': if propertySet.getProperty(propertyName): self.placer.start() else: self.placer.stop() frameObj.properties.connectPropertyChanged(onFramePropertyModified) onFramePropertyModified(frameObj, 'Edit') frameObj.connectFrameModified(self.onWalkingGoalModified) self.onWalkingGoalModified(frameObj)
def vtk_image_to_numpy_array(vtk_image): w, h, _ = vtk_image.GetDimensions() scalars = vnp.getNumpyFromVtk( vtk_image, vtk_image.GetPointData().GetScalars().GetName()) img = scalars.reshape(h, w, -1) img = np.flipud( img ) # you might need to flip the image rows, vtk has a difference row ordering convention that numpy/opencv return img
def __init__(self, uid, view, seed_pose, irisDriver, existing_region=None): d = DebugData() self.uid = uid vis.PolyDataItem.__init__(self, "IRIS region {:d}".format(uid), d.getPolyData(), view) self.transform = seed_pose d.addSphere((0,0,0), radius=0.02) self.seedObj = vis.showPolyData(d.getPolyData(), 'region seed', parent=om.getOrCreateContainer('IRIS region seeds')) self.seedObj.actor.SetUserTransform(self.transform) self.frameObj = vis.showFrame(self.transform, 'region seed frame', scale=0.2, visible=False, parent=self.seedObj) self.frameObj.setProperty('Edit', True) self.frameObj.widget.HandleRotationEnabledOff() terrain = om.findObjectByName('HEIGHT_MAP_SCENE') if terrain: rep = self.frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) pos = np.array(self.frameObj.transform.GetPosition()) polyData = filterUtils.removeNonFinitePoints(terrain.polyData) if polyData.GetNumberOfPoints(): polyData = segmentation.labelDistanceToLine(polyData, pos, pos+[0,0,1]) polyData = segmentation.thresholdPoints(polyData, 'distance_to_line', [0.0, 0.1]) if polyData.GetNumberOfPoints(): pos[2] = np.nanmax(vnp.getNumpyFromVtk(polyData, 'Points')[:,2]) self.frameObj.transform.Translate(pos - np.array(self.frameObj.transform.GetPosition())) self.placer = PlacerWidget(view, self.seedObj, terrain) self.placer.start() else: self.frameObj.setProperty('Edit', True) self.frameObj.setProperty('Visible', True) self.driver = irisDriver self.safe_region = None self.addProperty('Visible', True) self.addProperty('Enabled for Walking', True) self.addProperty('Alpha', 1.0) self.addProperty('Color', QtGui.QColor(200,200,20)) self.frameObj.connectFrameModified(self.onFrameModified) if existing_region is None: self.onFrameModified(self.frameObj) else: self.setRegion(existing_region) self.setProperty('Alpha', 0.5) self.setProperty('Color', QtGui.QColor(220,220,220))
def removeOriginPoints(polyData): """ openni2-lcm driver publishes 0.0 depth for points with invalid range :param polyData: :return: """ points = vnp.getNumpyFromVtk(polyData, 'Points') labels = np.array(np.sum(points, axis=1) == 0.0, dtype=int) vnp.addNumpyToVtk(polyData, labels, 'is_origin_point') return filterUtils.thresholdPoints(polyData, 'is_origin_point', [0.0, 0.0])
def rescalePolyData(polyDataList): pointList = [ vnp.getNumpyFromVtk(polyData, 'Points') for polyData in polyDataList ] # scaleFactor = np.max([np.max(np.abs(points)) for points in pointList]) scaleFactor = np.max( [np.max(np.linalg.norm(points, axis=1)) for points in pointList]) for points in pointList: points /= np.max(scaleFactor) return scaleFactor
def captureLabelImage(self, filename): view = self.globalsDict['view'] self.disableLighting() im = sgp.saveScreenshot(view, filename, shouldWrite=False) if filename is not None: img = vnp.getNumpyFromVtk(im, 'ImageScalars') assert img.dtype == np.uint8 img.shape = (im.GetDimensions()[1], im.GetDimensions()[0], 3) img = np.flipud(img) img = img[:, :, 0] scipy.misc.imsave(filename, img) return im
def getDepthMapData(self, viewId): mapId = self.reader.GetCurrentMapId(viewId) if mapId < 0: return None, None depthImage = vtk.vtkImageData() transform = vtk.vtkTransform() self.reader.GetDataForMapId(viewId, mapId, depthImage, transform) dims = depthImage.GetDimensions() d = vnp.getNumpyFromVtk(depthImage, 'ImageScalars') d = d.reshape(dims[1], dims[0]) t = np.array([[transform.GetMatrix().GetElement(r, c) for c in xrange(4)] for r in xrange(4)]) return d, t
def onNewWalkingGoal(self, walkingGoal=None): walkingGoal = walkingGoal or self.newWalkingGoalFrame(self.robotModel) frameObj = vis.updateFrame(walkingGoal, 'walking goal', parent='planning', scale=0.25) frameObj.setProperty('Edit', True) rep = frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) frameObj.widget.HandleRotationEnabledOff() if self.placer: self.placer.stop() terrain = om.findObjectByName('HEIGHT_MAP_SCENE') if terrain: pos = np.array(frameObj.transform.GetPosition()) polyData = filterUtils.removeNonFinitePoints(terrain.polyData) if polyData.GetNumberOfPoints(): polyData = segmentation.labelDistanceToLine(polyData, pos, pos+[0,0,1]) polyData = segmentation.thresholdPoints(polyData, 'distance_to_line', [0.0, 0.1]) if polyData.GetNumberOfPoints(): pos[2] = np.nanmax(vnp.getNumpyFromVtk(polyData, 'Points')[:,2]) frameObj.transform.Translate(pos - np.array(frameObj.transform.GetPosition())) d = DebugData() d.addSphere((0,0,0), radius=0.03) handle = vis.showPolyData(d.getPolyData(), 'walking goal terrain handle', parent=frameObj, visible=True, color=[1,1,0]) handle.actor.SetUserTransform(frameObj.transform) self.placer = PlacerWidget(app.getCurrentRenderView(), handle, terrain) def onFramePropertyModified(propertySet, propertyName): if propertyName == 'Edit': if propertySet.getProperty(propertyName): self.placer.start() else: self.placer.stop() frameObj.properties.connectPropertyChanged(onFramePropertyModified) onFramePropertyModified(frameObj, 'Edit') frameObj.connectFrameModified(self.onWalkingGoalModified) self.onWalkingGoalModified(frameObj)
def numpyToVtkImage(data, spacing=(1.0, 1.0, 1.0), origin=(0.0, 0.0, 0.0)): assert len(data.shape) == 3 image = vtk.vtkImageData() image.SetWholeExtent(0, data.shape[0] - 1, 0, data.shape[1] - 1, 0, data.shape[2] - 1) image.SetSpacing(spacing) image.SetOrigin(origin) image.SetExtent(image.GetWholeExtent()) image.SetNumberOfScalarComponents(1) image.SetScalarType(vtk.VTK_DOUBLE) image.AllocateScalars() d = vnp.getNumpyFromVtk(image, 'ImageScalars') np.copyto(d, data.flatten()) return image
def createTextureGround(imageFilename, view): pd = createTexturedPlane() texture = createTexture(imageFilename) texture.RepeatOn() tcoords = vnp.getNumpyFromVtk(pd, 'Texture Coordinates') tcoords *= 60 t = vtk.vtkTransform() t.PostMultiply() t.Scale(200,200,200) t.Translate(0,0,-0.005) pd = filterUtils.transformPolyData(pd, t) obj = vis.showPolyData(pd, 'ground', view=view, alpha=1.0, parent='skybox') obj.actor.SetTexture(texture) obj.actor.GetProperty().LightingOff()
def createTextureGround(imageFilename, view): pd = createTexturedPlane() texture = createTexture(imageFilename) texture.RepeatOn() tcoords = vnp.getNumpyFromVtk(pd, "Texture Coordinates") tcoords *= 60 t = vtk.vtkTransform() t.PostMultiply() t.Scale(200, 200, 200) t.Translate(0, 0, -0.005) pd = filterUtils.transformPolyData(pd, t) obj = vis.showPolyData(pd, "ground", view=view, alpha=1.0, parent="skybox") obj.actor.SetTexture(texture) obj.actor.GetProperty().LightingOff()
def computePointToSurfaceDistance(pointsPolyData, meshPolyData): cl = vtk.vtkCellLocator() cl.SetDataSet(meshPolyData) cl.BuildLocator() points = vnp.getNumpyFromVtk(pointsPolyData, 'Points') dists = np.zeros(len(points)) closestPoint = np.zeros(3) closestPointDist = vtk.mutable(0.0) cellId = vtk.mutable(0) subId = vtk.mutable(0) for i in xrange(len(points)): cl.FindClosestPoint(points[i], closestPoint, cellId, subId, closestPointDist) dists[i] = closestPointDist return np.sqrt(dists)
def showNumpyImage(self, img, flip=True): image = self.getImage() if not image: image = vtk.vtkImageData() self.setImage(image) if flip: img = np.flipud(img) height, width, numChannels = img.shape dims = image.GetDimensions() if dims[0] != width or dims[1] != height or image.GetNumberOfScalarComponents() != numChannels: image.SetDimensions(width, height, 1) image.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, numChannels) scalars = vnp.getNumpyFromVtk(image, 'ImageScalars') if numChannels > 1: scalars[:] = img.reshape(width*height, numChannels)[:] else: scalars[:] = img.reshape(width*height)[:] image.Modified() self.view.render()
def loadObjFile(filename, scaleFactor=0.001): baseName = os.path.basename(filename) folder = om.getOrCreateContainer(baseName) meshes, actors = ioUtils.readObjMtl(filename) print 'loaded %d meshes from file: %s' % (len(meshes), filename) for i, mesh, actor in zip(xrange(len(meshes)), meshes, actors): mesh = filterUtils.cleanPolyData(mesh) mesh = filterUtils.computeNormals(mesh) obj = vis.showPolyData(mesh, baseName + ' piece %d'%i, parent=folder) obj.setProperty('Color', actor.GetProperty().GetColor()) obj.actor.SetTexture(actor.GetTexture()) pts = vnp.getNumpyFromVtk(obj.polyData, 'Points') pts *= scaleFactor print ' mesh %d, num points %d' % (i, obj.polyData.GetNumberOfPoints()) #if actor.GetTexture(): # showImage(actor.GetTexture().GetInput()) polyData = filterUtils.appendPolyData([obj.polyData for obj in folder.children()]) vis.showPolyData(polyData, baseName + ' merged', parent=folder, visible=False) print ' total points:', polyData.GetNumberOfPoints() # compute centroid and subtract from points to move the mesh to the origin origin = np.array(filterUtils.computeCentroid(polyData)) t = transformUtils.frameFromPositionAndRPY(-origin, [0.0, 0.0, 0.0]) for obj in folder.children(): obj.setPolyData(filterUtils.transformPolyData(obj.polyData, t)) return folder.children()[-1].polyData
def computeCentroid(polyData): return np.average(vnp.getNumpyFromVtk(polyData, 'Points'), axis=0)
from director import imageview from director import consoleapp import director.vtkAll as vtk import director.vtkNumpy as vnp # create a vtkImageData object s = vtk.vtkRTAnalyticSource() s.SetWholeExtent(-100, 100, -100, 100, 0, 0) s.Update() image = s.GetOutput() # get image data as a numpy array w, h, _ = image.GetDimensions() img = vnp.getNumpyFromVtk(image, 'RTData').reshape(h, w, -1) # show numpy image data view = imageview.ImageView() view.showNumpyImage(img) view.show() # convert back to vtkImageData and show view2 = imageview.ImageView() view2.setImage(vnp.numpyToImageData(img)) view2.show() consoleapp.ConsoleApp.start()
def hasNonFinitePoints(polyData, arrayName="Points"): pts = vnp.getNumpyFromVtk(polyData, arrayName) return np.isfinite(pts).any()
from director import imageview from director import consoleapp import director.vtkAll as vtk import director.vtkNumpy as vnp # create a vtkImageData object s = vtk.vtkRTAnalyticSource() s.SetWholeExtent(-100, 100, -100, 100, 0, 0) s.Update() image = s.GetOutput() # get image data as a numpy array w,h,_ = image.GetDimensions() img = vnp.getNumpyFromVtk(image, 'RTData').reshape(h,w,-1) # show numpy image data view = imageview.ImageView() view.showNumpyImage(img) view.show() # convert back to vtkImageData and show view2 = imageview.ImageView() view2.setImage(vnp.numpyToImageData(img)) view2.show() consoleapp.ConsoleApp.start()
if testNormals: print 'computing normals...' f = vtk.vtkRobustNormalEstimator() f.SetInput(polyData) f.SetMaxIterations(100) f.SetMaxEstimationError(0.01) f.SetMaxCenterError(0.02) f.SetComputeCurvature(True) f.SetRadius(0.1) f.Update() polyData = shallowCopy(f.GetOutput()) print 'done.' # filter points without normals normals = vnp.getNumpyFromVtk(polyData, 'normals') segmentation.flipNormalsWithViewDirection(polyData, [1, -1, -1]) normalsValid = np.any(normals, axis=1) vnp.addNumpyToVtk(polyData, np.array(normalsValid, dtype=np.int32), 'normals_valid') vis.showPolyData(polyData, 'scene points', colorByName='normals_valid', visible=False) numPoints = polyData.GetNumberOfPoints() polyData = segmentation.thresholdPoints(polyData, 'normals_valid', [1, 1])
def vtk_image_to_numpy_array(vtk_image): w, h, _ = vtk_image.GetDimensions() scalars = vnp.getNumpyFromVtk(vtk_image, vtk_image.GetPointData().GetScalars().GetName()) img = scalars.reshape(h, w, -1) img = np.flipud(img) # you might need to flip the image rows, vtk has a difference row ordering convention that numpy/opencv return img
def __init__(self, uid, view, seed_pose, irisDriver, existing_region=None): d = DebugData() self.uid = uid vis.PolyDataItem.__init__(self, "IRIS region {:d}".format(uid), d.getPolyData(), view) self.transform = seed_pose d.addSphere((0, 0, 0), radius=0.02) self.seedObj = vis.showPolyData( d.getPolyData(), 'region seed', parent=om.getOrCreateContainer('IRIS region seeds')) self.seedObj.actor.SetUserTransform(self.transform) self.frameObj = vis.showFrame(self.transform, 'region seed frame', scale=0.2, visible=False, parent=self.seedObj) self.frameObj.setProperty('Edit', True) self.frameObj.widget.HandleRotationEnabledOff() terrain = om.findObjectByName('HEIGHT_MAP_SCENE') if terrain: rep = self.frameObj.widget.GetRepresentation() rep.SetTranslateAxisEnabled(2, False) rep.SetRotateAxisEnabled(0, False) rep.SetRotateAxisEnabled(1, False) pos = np.array(self.frameObj.transform.GetPosition()) polyData = filterUtils.removeNonFinitePoints(terrain.polyData) if polyData.GetNumberOfPoints(): polyData = segmentation.labelDistanceToLine( polyData, pos, pos + [0, 0, 1]) polyData = segmentation.thresholdPoints( polyData, 'distance_to_line', [0.0, 0.1]) if polyData.GetNumberOfPoints(): pos[2] = np.nanmax( vnp.getNumpyFromVtk(polyData, 'Points')[:, 2]) self.frameObj.transform.Translate( pos - np.array(self.frameObj.transform.GetPosition())) self.placer = PlacerWidget(view, self.seedObj, terrain) self.placer.start() else: self.frameObj.setProperty('Edit', True) self.frameObj.setProperty('Visible', True) self.driver = irisDriver self.safe_region = None self.addProperty('Visible', True) self.addProperty('Enabled for Walking', True) self.addProperty('Alpha', 1.0) self.addProperty('Color', QtGui.QColor(200, 200, 20)) self.frameObj.connectFrameModified(self.onFrameModified) if existing_region is None: self.onFrameModified(self.frameObj) else: self.setRegion(existing_region) self.setProperty('Alpha', 0.5) self.setProperty('Color', QtGui.QColor(220, 220, 220))
def computeCentroid(polyData): return np.average(vnp.getNumpyFromVtk(polyData, "Points"), axis=0)
file and re-save it as an ascii ply file. ''' import os from director import ioUtils from director import vtkNumpy as vnp if __name__ == '__main__': filename = sys.argv[1] outputFilename = os.path.splitext(filename)[0] + '.vtp' print "reading poly data" polyData = ioUtils.readPolyData(filename) print "finished reading poly data" # TODO: # This should just be fixed in ioUtils.readPolyData, but for now # there is a workaround for an issue with the ply reader. # The output of the ply reader has points but not vertex cells, # so create a new polydata with vertex cells and copy the cells over. points = vnp.getNumpyFromVtk(polyData, 'Points') newPolyData = vnp.numpyToPolyData(points, createVertexCells=True) polyData.SetVerts(newPolyData.GetVerts()) print 'writing:', outputFilename ioUtils.writePolyData(polyData, outputFilename)
def hasNonFinitePoints(polyData, arrayName='Points'): pts = vnp.getNumpyFromVtk(polyData, arrayName) return np.isfinite(pts).any()
print 'computing normals...' f = vtk.vtkRobustNormalEstimator() f.SetInput(polyData) f.SetMaxIterations(100) f.SetMaxEstimationError(0.01) f.SetMaxCenterError(0.02) f.SetComputeCurvature(True) f.SetRadius(0.1) f.Update() polyData = shallowCopy(f.GetOutput()) print 'done.' # filter points without normals normals = vnp.getNumpyFromVtk(polyData, 'normals') segmentation.flipNormalsWithViewDirection(polyData, [1, -1, -1]) normalsValid = np.any(normals, axis=1) vnp.addNumpyToVtk(polyData, np.array(normalsValid, dtype=np.int32), 'normals_valid') vis.showPolyData(polyData, 'scene points', colorByName='normals_valid', visible=False) numPoints = polyData.GetNumberOfPoints() polyData = segmentation.thresholdPoints(polyData, 'normals_valid', [1, 1]) vis.showPolyData(polyData, 'cloud normals', colorByName='curvature', visible=True) print 'number of filtered points:', numPoints - polyData.GetNumberOfPoints()
def fitRunningBoardAtFeet(self): # get stance frame startPose = self.getPlanningStartPose() stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint( self.robotSystem.robotStateModel, useWorldZ=False) stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame) # get pointcloud and extract search region covering the running board polyData = segmentation.getCurrentRevolutionData() polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) _, polyData = segmentation.removeGround(polyData) polyData = segmentation.cropToBox(polyData, stanceFrame, [1.0, 1.0, 0.1]) if not polyData.GetNumberOfPoints(): print('empty search region point cloud') return vis.updatePolyData(polyData, 'running board search points', parent=segmentation.getDebugFolder(), color=[0, 1, 0], visible=False) # extract maximal points along the stance x axis perpAxis = stanceFrameAxes[0] edgeAxis = stanceFrameAxes[1] edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis) edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints) vis.updatePolyData(edgePoints, 'edge points', parent=segmentation.getDebugFolder(), visible=True) # ransac fit a line to the edge points linePoint, lineDirection, fitPoints = segmentation.applyLineFit( edgePoints) if np.dot(lineDirection, stanceFrameAxes[1]) < 0: lineDirection = -lineDirection linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels', [1.0, 1.0]) dists = np.dot( vnp.getNumpyFromVtk(linePoints, 'Points') - linePoint, lineDirection) p1 = linePoint + lineDirection * np.min(dists) p2 = linePoint + lineDirection * np.max(dists) vis.updatePolyData(fitPoints, 'line fit points', parent=segmentation.getDebugFolder(), colorByName='ransac_labels', visible=False) # compute a new frame that is in plane with the stance frame # and matches the orientation and position of the detected edge origin = np.array(stanceFrame.GetPosition()) normal = np.array(stanceFrameAxes[2]) # project stance origin to edge, then back to foot frame originProjectedToEdge = linePoint + lineDirection * np.dot( origin - linePoint, lineDirection) originProjectedToPlane = segmentation.projectPointToPlane( originProjectedToEdge, origin, normal) zaxis = np.array(stanceFrameAxes[2]) yaxis = np.array(lineDirection) xaxis = np.cross(yaxis, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis = np.cross(zaxis, xaxis) yaxis /= np.linalg.norm(yaxis) d = DebugData() d.addSphere(p1, radius=0.005) d.addSphere(p2, radius=0.005) d.addLine(p1, p2) d.addSphere(originProjectedToEdge, radius=0.001, color=[1, 0, 0]) d.addSphere(originProjectedToPlane, radius=0.001, color=[0, 1, 0]) d.addLine(originProjectedToPlane, origin, color=[0, 1, 0]) d.addLine(originProjectedToEdge, origin, color=[1, 0, 0]) vis.updatePolyData(d.getPolyData(), 'running board edge', parent=segmentation.getDebugFolder(), colorByName='RGB255', visible=False) # update the running board box affordance position and orientation to # fit the detected edge box = self.spawnRunningBoardAffordance() boxDimensions = box.getProperty('Dimensions') t = transformUtils.getTransformFromAxesAndOrigin( xaxis, yaxis, zaxis, originProjectedToPlane) t.PreMultiply() t.Translate(-boxDimensions[0] / 2.0, 0.0, -boxDimensions[2] / 2.0) box.getChildFrame().copyFrame(t) self.initialize()
Note, the ply file needs to be in ascii format and not binary. The vtk ply reader seems to crash on binary ply files. You can use meshlab to open a binary ply file and re-save it as an ascii ply file. ''' import os from director import ioUtils from director import vtkNumpy as vnp if __name__ == '__main__': filename = sys.argv[1] outputFilename = os.path.splitext(filename)[0] + '.vtp' print("reading poly data") polyData = ioUtils.readPolyData(filename) print("finished reading poly data") # TODO: # This should just be fixed in ioUtils.readPolyData, but for now # there is a workaround for an issue with the ply reader. # The output of the ply reader has points but not vertex cells, # so create a new polydata with vertex cells and copy the cells over. points = vnp.getNumpyFromVtk(polyData, 'Points') newPolyData = vnp.numpyToPolyData(points, createVertexCells=True) polyData.SetVerts(newPolyData.GetVerts()) print('writing:', outputFilename) ioUtils.writePolyData(polyData, outputFilename)
app = consoleapp.ConsoleApp() view = app.createView(useGrid=False) cellSize = 0.5 numberOfCells = 10 grid = vtk.vtkGridSource() grid.SetScale(cellSize) grid.SetGridSize(numberOfCells) grid.SetSurfaceEnabled(True) grid.Update() grid = grid.GetOutput() pts = vnp.getNumpyFromVtk(grid, 'Points') print "pts ", np.shape(pts) print "pts is this ", pts #print "grid is this ", grid def evalXYpoly(x,y): return x**2 + 3*y**2 + x - y # # # This distorts the map in z # for row in pts: # print "row before, ", row # row[2] = evalXYpoly(row[0],row[1]) # print "row after, ", row
def getMaxZCoordinate(polyData): return float(np.nanmax(vnp.getNumpyFromVtk(polyData, 'Points')[:, 2]))
def fitRunningBoardAtFeet(self): # get stance frame startPose = self.getPlanningStartPose() stanceFrame = self.robotSystem.footstepsDriver.getFeetMidPoint(self.robotSystem.robotStateModel, useWorldZ=False) stanceFrameAxes = transformUtils.getAxesFromTransform(stanceFrame) # get pointcloud and extract search region covering the running board polyData = segmentation.getCurrentRevolutionData() polyData = segmentation.applyVoxelGrid(polyData, leafSize=0.01) _, polyData = segmentation.removeGround(polyData) polyData = segmentation.cropToBox(polyData, stanceFrame, [1.0, 1.0, 0.1]) if not polyData.GetNumberOfPoints(): print 'empty search region point cloud' return vis.updatePolyData(polyData, 'running board search points', parent=segmentation.getDebugFolder(), color=[0,1,0], visible=False) # extract maximal points along the stance x axis perpAxis = stanceFrameAxes[0] edgeAxis = stanceFrameAxes[1] edgePoints = segmentation.computeEdge(polyData, edgeAxis, perpAxis) edgePoints = vnp.getVtkPolyDataFromNumpyPoints(edgePoints) vis.updatePolyData(edgePoints, 'edge points', parent=segmentation.getDebugFolder(), visible=True) # ransac fit a line to the edge points linePoint, lineDirection, fitPoints = segmentation.applyLineFit(edgePoints) if np.dot(lineDirection, stanceFrameAxes[1]) < 0: lineDirection = -lineDirection linePoints = segmentation.thresholdPoints(fitPoints, 'ransac_labels', [1.0, 1.0]) dists = np.dot(vnp.getNumpyFromVtk(linePoints, 'Points')-linePoint, lineDirection) p1 = linePoint + lineDirection*np.min(dists) p2 = linePoint + lineDirection*np.max(dists) vis.updatePolyData(fitPoints, 'line fit points', parent=segmentation.getDebugFolder(), colorByName='ransac_labels', visible=False) # compute a new frame that is in plane with the stance frame # and matches the orientation and position of the detected edge origin = np.array(stanceFrame.GetPosition()) normal = np.array(stanceFrameAxes[2]) # project stance origin to edge, then back to foot frame originProjectedToEdge = linePoint + lineDirection*np.dot(origin - linePoint, lineDirection) originProjectedToPlane = segmentation.projectPointToPlane(originProjectedToEdge, origin, normal) zaxis = np.array(stanceFrameAxes[2]) yaxis = np.array(lineDirection) xaxis = np.cross(yaxis, zaxis) xaxis /= np.linalg.norm(xaxis) yaxis = np.cross(zaxis, xaxis) yaxis /= np.linalg.norm(yaxis) d = DebugData() d.addSphere(p1, radius=0.005) d.addSphere(p2, radius=0.005) d.addLine(p1, p2) d.addSphere(originProjectedToEdge, radius=0.001, color=[1,0,0]) d.addSphere(originProjectedToPlane, radius=0.001, color=[0,1,0]) d.addLine(originProjectedToPlane, origin, color=[0,1,0]) d.addLine(originProjectedToEdge, origin, color=[1,0,0]) vis.updatePolyData(d.getPolyData(), 'running board edge', parent=segmentation.getDebugFolder(), colorByName='RGB255', visible=False) # update the running board box affordance position and orientation to # fit the detected edge box = self.spawnRunningBoardAffordance() boxDimensions = box.getProperty('Dimensions') t = transformUtils.getTransformFromAxesAndOrigin(xaxis, yaxis, zaxis, originProjectedToPlane) t.PreMultiply() t.Translate(-boxDimensions[0]/2.0, 0.0, -boxDimensions[2]/2.0) box.getChildFrame().copyFrame(t) self.initialize()