def addImage(self, name, robotName): if robotName in self.images and name in self.images[robotName]: return image = vtk.vtkImageData() tex = vtk.vtkTexture() tex.SetInputData(image) tex.EdgeClampOn() tex.RepeatOff() if robotName not in self.images: self.imageUtimes[robotName] = {} self.images[robotName] = {} self.textures[robotName] = {} self.imageRotations180[robotName] = {} self.queue[robotName] = {} self.providerClasses[robotName] = None self.imageUtimes[robotName][name] = 0 self.images[robotName][name] = image self.textures[robotName][name] = tex self.imageRotations180[robotName][name] = False if self.providerClasses[robotName]: self.queue[robotName][name] = self.providerClasses[ robotName].initialise_from_name(name, robotName) else: print( "Could not initialise camera {} as the provider class is not initialised." .format(name)) self.queue[robotName][name] = None
def computeDepthImageAndPointCloud(depthBuffer, colorBuffer, camera): """ Returns depth image and pointcloud as vtkImageData and vtkPolyData :param depthBuffer: OpenGL depth buffer :type depthBuffer: :param colorBuffer: OpenGL color buffer :type colorBuffer: :param camera: vtkCamera instance that was used to render the scene :type camera: vtkCamera instance :return: :rtype: vtkImageData, vtkPolyData, numpy array """ depthImage = vtk.vtkImageData() pts = vtk.vtkPoints() ptColors = vtk.vtkUnsignedCharArray() vtk.vtkDepthImageUtils.DepthBufferToDepthImage(depthBuffer, colorBuffer, camera, depthImage, pts, ptColors) pts = vnp.numpy_support.vtk_to_numpy(pts.GetData()) polyData = vnp.numpyToPolyData(pts, createVertexCells=True) ptColors.SetName('rgb') polyData.GetPointData().AddArray(ptColors) return depthImage, polyData, pts
def __init__(self): self.view = PythonQt.dd.ddQVTKWidgetView() self.view.setWindowTitle('Image View') self.imageActor = vtk.vtkImageActor() self.setImage(vtk.vtkImageData()) self.view.renderer().AddActor(self.imageActor) self.view.orientationMarkerWidget().Off() self.view.renderer().SetBackground([0,0,0]) self.view.renderer().SetBackground2([0,0,0]) self.view.installImageInteractor()
def __init__(self): self.view = PythonQt.dd.ddQVTKWidgetView() self.view.setWindowTitle('Image View') self.imageActor = vtk.vtkImageActor() self.setImage(vtk.vtkImageData()) self.view.renderer().AddActor(self.imageActor) self.view.orientationMarkerWidget().Off() self.view.renderer().SetBackground([0, 0, 0]) self.view.renderer().SetBackground2([0, 0, 0]) self.view.installImageInteractor()
def __init__(self): self.autoResetCamera = False self.view = PythonQt.dd.ddQVTKWidgetView() self.view.setWindowTitle('Image View') self.imageActor = vtk.vtkImageActor() self.setImage(vtk.vtkImageData()) self.view.renderer().AddActor(self.imageActor) self.view.orientationMarkerWidget().Off() self.setBackgroundColor([0,0,0]) self.initInteractor() self.installEventFilter() self.resetCamera()
def __init__(self): self.autoResetCamera = False self.view = PythonQt.dd.ddQVTKWidgetView() self.view.setWindowTitle("Image View") self.imageActor = vtk.vtkImageActor() self.setImage(vtk.vtkImageData()) self.view.renderer().AddActor(self.imageActor) self.view.orientationMarkerWidget().Off() self.setBackgroundColor([0, 0, 0]) self.initInteractor() self.installEventFilter() self.resetCamera()
def numpyToImageData(img, flip=True, vtktype=vtk.VTK_UNSIGNED_CHAR): if flip: img = np.flipud(img) height, width, numChannels = img.shape image = vtk.vtkImageData() image.SetDimensions(width, height, 1) image.AllocateScalars(vtktype, numChannels) scalars = getNumpyFromVtk(image, 'ImageScalars') if numChannels > 1: scalars[:] = img.reshape(width*height, numChannels)[:] else: scalars[:] = img.reshape(width*height)[:] return image
def numpyToImageData(img, flip=True, vtktype=vtk.VTK_UNSIGNED_CHAR): if flip: img = np.flipud(img) height, width, numChannels = img.shape image = vtk.vtkImageData() image.SetDimensions(width, height, 1) image.AllocateScalars(vtktype, numChannels) scalars = getNumpyFromVtk(image, 'ImageScalars') if numChannels > 1: scalars[:] = img.reshape(width * height, numChannels)[:] else: scalars[:] = img.reshape(width * height)[:] return image
def addImage(self, name): if name in self.images: return image = vtk.vtkImageData() tex = vtk.vtkTexture() tex.SetInput(image) tex.EdgeClampOn() tex.RepeatOff() self.imageUtimes[name] = 0 self.images[name] = image self.textures[name] = tex
def addImage(self, name): if name in self.images: return image = vtk.vtkImageData() tex = vtk.vtkTexture() tex.SetInput(image) tex.EdgeClampOn() tex.RepeatOff() self.imageUtimes[name] = 0 self.images[name] = image self.textures[name] = tex self.imageRotations180[name] = False
def computeDepthImageAndPointCloud(depthBuffer, colorBuffer, camera): ''' Input args are an OpenGL depth buffer and color buffer as vtkImageData objects, and the vtkCamera instance that was used to render the scene. The function returns returns a depth image and a point cloud as vtkImageData and vtkPolyData. ''' depthImage = vtk.vtkImageData() pts = vtk.vtkPoints() ptColors = vtk.vtkUnsignedCharArray() vtk.vtkDepthImageUtils.DepthBufferToDepthImage(depthBuffer, colorBuffer, camera, depthImage, pts, ptColors) pts = vnp.numpy_support.vtk_to_numpy(pts.GetData()) polyData = vnp.numpyToPolyData(pts, createVertexCells=True) ptColors.SetName('rgb') polyData.GetPointData().AddArray(ptColors) return depthImage, polyData, pts
def getDepthMapData(self, viewId): mapId = self.source.GetCurrentMapId(viewId) if mapId < 0: return None, None depthImage = vtk.vtkImageData() transform = vtk.vtkTransform() # print "getting depth image for viewId {:d} mapId {:d}".format(viewId, mapId) self.source.GetDataForMapId(viewId, mapId, depthImage, transform) dims = depthImage.GetDimensions() d = vnp.getNumpyFromVtk(depthImage, 'ImageScalars') d = d.reshape(dims[1], dims[0]) t = np.array([[transform.GetMatrix().GetElement(r, c) for c in xrange(4)] for r in xrange(4)]) return d, t
def numpyToImageData(img, flip=True, vtktype=None): if flip: img = np.flipud(img) assert len(img.shape) in (2, 3) height, width = img.shape[:2] numChannels = 1 if len(img.shape) == 2 else img.shape[2] image = vtk.vtkImageData() image.SetDimensions(width, height, 1) if vtktype is None: vtktype = numpy_support.get_vtk_array_type(img.dtype) image.AllocateScalars(vtktype, numChannels) scalars = getNumpyFromVtk(image, 'ImageScalars') if numChannels > 1: scalars[:] = img.reshape(width*height, numChannels)[:] else: scalars[:] = img.reshape(width*height)[:] return image
def computeDepthImageAndPointCloud(depthBuffer, colorBuffer, camera): ''' Input args are an OpenGL depth buffer and color buffer as vtkImageData objects, and the vtkCamera instance that was used to render the scene. The function returns returns a depth image and a point cloud as vtkImageData and vtkPolyData. ''' depthImage = vtk.vtkImageData() pts = vtk.vtkPoints() ptColors = vtk.vtkUnsignedCharArray() vtk.vtkDepthImageUtils.DepthBufferToDepthImage(depthBuffer, colorBuffer, camera, depthImage, pts, ptColors) pts = vnp.numpy_support.vtk_to_numpy(pts.GetData()) polyData = vnp.numpyToPolyData(pts, createVertexCells=True) ptColors.SetName('rgb') polyData.GetPointData().AddArray(ptColors) return depthImage, polyData
def getDepthMapData(self, viewId): mapId = self.source.GetCurrentMapId(viewId) if mapId < 0: return None, None depthImage = vtk.vtkImageData() transform = vtk.vtkTransform() # print "getting depth image for viewId {:d} mapId {:d}".format(viewId, mapId) self.source.GetDataForMapId(viewId, mapId, depthImage, transform) dims = depthImage.GetDimensions() d = vnp.getNumpyFromVtk(depthImage, 'ImageScalars') d = d.reshape(dims[1], dims[0]) t = np.array( [[transform.GetMatrix().GetElement(r, c) for c in xrange(4)] for r in xrange(4)]) return d, t
def showNumpyImage(self, img, flip=True): image = self.getImage() if not image: image = vtk.vtkImageData() self.setImage(image) if flip: img = np.flipud(img) height, width, numChannels = img.shape dims = image.GetDimensions() if dims[0] != width or dims[1] != height or image.GetNumberOfScalarComponents() != numChannels: image.SetDimensions(width, height, 1) image.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, numChannels) scalars = vnp.getNumpyFromVtk(image, 'ImageScalars') if numChannels > 1: scalars[:] = img.reshape(width*height, numChannels)[:] else: scalars[:] = img.reshape(width*height)[:] image.Modified() self.view.render()
#update camera transform cameraToCameraStart = poses.getCameraPoseAtUTime(utime) t = cameraToCameraStart common.setCameraTransform(camera, t) common.setCameraTransform(camera1, t) renWin.Render() renSource.Update() #update filters filter1.Modified() filter1.Update() windowToColorBuffer.Modified() windowToColorBuffer.Update() #extract depth image depthImage = vtk.vtkImageData() pts = vtk.vtkPoints() ptColors = vtk.vtkUnsignedCharArray() vtk.vtkDepthImageUtils.DepthBufferToDepthImage( filter1.GetOutput(), windowToColorBuffer.GetOutput(), camera, depthImage, pts, ptColors) scale.SetInputData(depthImage) scale.Update() #source = np.flip(np.reshape(numpy_support.vtk_to_numpy(renSource.GetOutput().GetPointData().GetScalars()),(480,640)),axis=0) #modify this for simulated depth source = np.flip(np.reshape( numpy_support.vtk_to_numpy( renSource.GetOutput().GetPointData().GetScalars()), (480, 640)),
def render_depth(renWin, renderer, camera, data_dir, data_dir_name, num_im, out_dir, use_mesh, object_dir, mesh='meshed_scene.ply', keyword=None): actor = vtk.vtkActor() filter1 = vtk.vtkWindowToImageFilter() imageWriter = vtk.vtkPNGWriter() scale = vtk.vtkImageShiftScale() if use_mesh: #use meshed version of scene if not glob.glob(data_dir + "/" + mesh): out = None if glob.glob(data_dir + "/original_log.lcmlog.ply"): out = "original_log.lcmlog.ply" elif glob.glob(data_dir + "/trimmed_log.lcmlog.ply"): out = "trimmed_log.lcmlog.ply" elif glob.glob('*.ply'): out = glob.glob('*.ply')[0] else: return mesher = mesh_wrapper.Mesh(out_dir=data_dir) status = mesher.mesh_cloud(out) print status #blocks until done mapper = vtk.vtkPolyDataMapper() fileReader = vtk.vtkPLYReader() fileReader.SetFileName(data_dir + "/" + mesh) mapper.SetInputConnection(fileReader.GetOutputPort()) actor.SetMapper(mapper) renderer.AddActor(actor) else: #import just the objects objects = common.Objects(data_dir, object_dir) objects.loadObjectMeshes("/registration_result.yaml", renderer, keyword=keyword) #setup filters filter1.SetInput(renWin) filter1.SetMagnification(1) filter1.SetInputBufferTypeToZBuffer() windowToColorBuffer = vtk.vtkWindowToImageFilter() windowToColorBuffer.SetInput(renWin) windowToColorBuffer.SetInputBufferTypeToRGB() scale.SetOutputScalarTypeToUnsignedShort() scale.SetScale(1000) poses = common.CameraPoses(data_dir + "/posegraph.posegraph") for i in range(1, num_im + 1): try: utimeFile = open( data_dir + "/images/" + str(i).zfill(10) + "_utime.txt", 'r') utime = int(utimeFile.read()) #update camera transform cameraToCameraStart = poses.getCameraPoseAtUTime(utime) t = cameraToCameraStart common.setCameraTransform(camera, t) renWin.Render() #update filters filter1.Modified() filter1.Update() windowToColorBuffer.Modified() windowToColorBuffer.Update() #extract depth image depthImage = vtk.vtkImageData() pts = vtk.vtkPoints() ptColors = vtk.vtkUnsignedCharArray() vtk.vtkDepthImageUtils.DepthBufferToDepthImage( filter1.GetOutput(), windowToColorBuffer.GetOutput(), camera, depthImage, pts, ptColors) scale.SetInputData(depthImage) scale.Update() #write out depth image imageWriter.SetFileName(out_dir + str(i).zfill(10) + "_" + data_dir_name + "_depth_ground_truth.png") imageWriter.SetInputConnection(scale.GetOutputPort()) imageWriter.Write() except (IOError): break renderer.RemoveAllViewProps() renWin.Render()