def TestMarching(atlas): reader = vtk.vtkNIFTIImageReader() reader.SetFileName(atlas) reader.Update() image = reader.GetOutput() # threshold threshold = vtk.vtkImageThreshold() threshold.SetInputData(image) threshold.ThresholdBetween(60,60) threshold.ReplaceOutOn() threshold.SetOutValue(0) threshold.Update() image1 = threshold.GetOutput() marching = vtk.vtkMarchingCubes() marching.SetInputData(image1) marching.SetValue(0,60) marching.Update() area = marching.GetOutput() colors = vtk.vtkUnsignedCharArray() colors.SetNumberOfComponents(3) colors.SetName("test") colors.InsertNextTupleValue([0,1,1]) area.GetCellData().SetScalars(colors) # visualization print "visualizing..." mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(area) actor = vtk.vtkActor() actor.SetMapper(mapper) renderer = vtk.vtkRenderer() renderer.AddActor(actor) window = vtk.vtkRenderWindow() window.AddRenderer(renderer) interactor = vtk.vtkRenderWindowInteractor() window.SetInteractor(interactor) window.Render() interactor.Start()
def load_data_prob(self, img_prob): rdr2 = vtk.vtkNIFTIImageReader() rdr2.SetFileName(img_prob) rdr2.Update() self.rdr2=rdr2 self.b0 = rdr2.GetOutput() self.iso_surfaces() self.opacity()
def change_dialog_3D(self, load_3d): rdr = vtk.vtkNIFTIImageReader() rdr.SetFileName(load_3d) rdr.Update() self.img.SetInputData(rdr.GetOutput()) self.outline.SetInputData(rdr.GetOutput()) self.render_window.Render() self.rdr = rdr
def change_dialog_prob(self, load_prob): rdr2 =vtk.vtkNIFTIImageReader() rdr2.SetFileName(load_prob) rdr2.Update() info=rdr2.GetOutput() min_s, max_s= info.GetScalarRange() self.contours.GenerateValues(5, (min_s, max_s)) self.contours.SetInputData(rdr2.GetOutput()) self.render_window.Render() self.rdr2=rdr2
def load_data_3d(self, img_3d): rdr = vtk.vtkNIFTIImageReader() rdr.SetFileName(img_3d) rdr.Update() rdr.GetOutput() self.rdr=rdr print(rdr.GetOutput()) outline_actor = self.create_outline() self.renderer.AddActor(outline_actor) self.widget() self.renderer.ResetCamera() self.render_window.Render()
def TestReadWriteRead(infile, outfile): """Read, write, and re-read a file, return difference.""" inpath = os.path.join(str(VTK_DATA_ROOT), "Data", infile) outpath = os.path.join(str(VTK_TEMP_DIR), outfile) # read a NIFTI file reader = vtk.vtkNIFTIImageReader() reader.SetFileName(inpath) reader.TimeAsVectorOn() reader.Update() writer = vtk.vtkNIFTIImageWriter() writer.SetInputConnection(reader.GetOutputPort()) writer.SetFileName(outpath) # copy most information directoy from the header writer.SetNIFTIHeader(reader.GetNIFTIHeader()) # this information will override the reader's header writer.SetQFac(reader.GetQFac()) writer.SetTimeDimension(reader.GetTimeDimension()) writer.SetQFormMatrix(reader.GetQFormMatrix()) writer.SetSFormMatrix(reader.GetSFormMatrix()) writer.Write() reader2 = vtk.vtkNIFTIImageReader() reader2.SetFileName(outpath) reader2.TimeAsVectorOn() reader2.Update() diff = vtk.vtkImageMathematics() diff.SetOperationToSubtract() diff.SetInputConnection(0,reader.GetOutputPort()) diff.SetInputConnection(1,reader2.GetOutputPort()) diff.Update() diffrange = diff.GetOutput().GetScalarRange() differr = diffrange[0]**2 + diffrange[1]**2 return differr
def load_atlas(path, intensities, signs): ''' path: path to atlas file opacity: opacity of overlayed atlas brain ''' nifti_reader = vtk.vtkNIFTIImageReader() nifti_reader.SetFileName(path) nifti_reader.Update() # intensities = intensities/math.sqrt(np.mean(intensities**2)) # The following class is used to store transparencyv-values for later # retrival. In our case, we want the value 0 to be completly opaque alphaChannelFunc = vtk.vtkPiecewiseFunction() alphaChannelFunc.AddPoint(0, 0.0) for i in range(len(intensities)): alphaChannelFunc.AddPoint(i+1, intensities[i]) # This class stores color data and can create color tables from a few color # points. For this demo, we want the three cubes to be of the colors red # green and blue. colorFunc = vtk.vtkColorTransferFunction() colorFunc.AddRGBPoint(0, 0.0, 0.0, 0.0) for i in range(len(signs)): if signs[i] < 0: colorFunc.AddRGBPoint(i+1, 0.0, 0.0, intensities[i]) elif signs[i] > 0: colorFunc.AddRGBPoint(i+1, intensities[i], 0.0, 0.0) else: colorFunc.AddRGBPoint(i+1, 1.0, 1.0, 1.0) # The previous two classes stored properties. Because we want to apply # these properties to the volume we want to render, we have to store them # in a class that stores volume prpoperties. volumeProperty = vtk.vtkVolumeProperty() volumeProperty.SetColor(colorFunc) volumeProperty.SetScalarOpacity(alphaChannelFunc) volumeProperty.ShadeOn() # We can finally create our volume. We also have to specify the data for # it, as well as how the data will be rendered. volumeMapper = vtk.vtkSmartVolumeMapper() volumeMapper.SetInputDataObject(nifti_reader.GetOutput()) # The class vtkVolume is used to pair the preaviusly declared volume as # well as the properties to be used when rendering that volume. volume = vtk.vtkVolume() volume.SetMapper(volumeMapper) volume.SetProperty(volumeProperty) return volume
def read_vtk_image(inputImage): """ This function... :param inputImage: :return: """ # check image extension img_ext = str(inputImage).split(".", 1)[-1] if "nii" in img_ext: imageReader = vtk.vtkNIFTIImageReader() else: print( "ERROR: Input must be NIFTI image file. \n\tUnrecognized extension: {0}".format( img_ext ) ) return sys.exit(os.EX_IOERR) # read image in imageReader.SetFileName(inputImage) imageReader.Update() return imageReader.GetOutput()
def render_handler(self): import vtk import sys filename = "segmentedlungs.nii" reader_src = vtk.vtkNIFTIImageReader() reader_src.SetFileName(filename) # replace filename # 3 set up the volume mapper volmp = vtk.vtkGPUVolumeRayCastMapper() volmp.SetInputConnection(reader_src.GetOutputPort()) # 4 transfer functions for color and opacity funAlpha = vtk.vtkPiecewiseFunction() # opacity r1 = int(self.r_slider.value()) / 100 b1 = int(self.b_slider.value()) / 100 g1 = int(self.g_slider.value()) / 100 r2 = int(self.r_slider_2.value()) / 100 b2 = int(self.b_slider_2.value()) / 100 g2 = int(self.g_slider_2.value()) / 100 op_int = int(self.opacity_int.value()) / 100 op_ple = int(self.opacity_ple.value()) / 1000 low = int(self.lower.value()) high = int(self.higher.value()) funAlpha.AddPoint(0, 0) funAlpha.AddPoint(-10, op_ple) #pleura divide by 100 funAlpha.AddPoint(10, op_ple) funAlpha.AddPoint(low, op_int) #internals divide by 10 funAlpha.AddPoint(high, op_int) funAlpha.AddPoint(high + 1, 0) funAlpha.AddPoint(1000, 0) funColor = vtk.vtkColorTransferFunction() funColor.AddRGBPoint(low, r1, g1, b1) funColor.AddRGBPoint(high, r1, g1, b1) funColor.AddRGBPoint(-10, r2, g2, b2) funColor.AddRGBPoint(10, r2, g2, b2) funColor.AddRGBPoint(high + 1, 0, 0.5, 0) funColor.AddRGBPoint(1000, 0.5, 0, 0) funColor.ClampingOff # 6 set up the volume properties with linear interpolation volumeProperty = vtk.vtkVolumeProperty() volumeProperty.SetColor(0, funColor) volumeProperty.SetScalarOpacity(0, funAlpha) #volumeProperty.ShadeOn() volumeProperty.SetInterpolationTypeToLinear() # 7 set up the actor and connect it to the mapper volAct = vtk.vtkVolume() volAct.SetMapper(volmp) volAct.SetProperty(volumeProperty) renderer = vtk.vtkRenderer() camera = vtk.vtkCamera() camera.SetViewUp(0., 1., 0.) camera.SetFocalPoint(250, 250, 250) camera.SetPosition(-600, 150, 100) # 9 set the color of the renderers background to black (0., 0., 0.) renderer.SetBackground(1., 1., 1.) # 10 set the renderers camera as active renderer.SetActiveCamera(camera) # 11 add the volume actor to the renderer renderer.AddActor(volAct) # 12 create a render window ren_win = vtk.vtkRenderWindow() # 13 add renderer to the render window ren_win.AddRenderer(renderer) # 14 create an interactor iren = vtk.vtkRenderWindowInteractor() # 15 connect interactor to the render window iren.SetRenderWindow(ren_win) # 16 start displaying the render window if int(self.stereo_volume.value()) == 1: ren_win.GetStereoCapableWindow() ren_win.StereoCapableWindowOn() ren_win.SetStereoRender(1) ren_win.SetStereoTypeToAnaglyph() ren_win.Render() # 17 make the window interactive (start the interactor) iren.Start()
def MarchingCube(file_name): coor1 = [ 128.463,208.477,51.7136] coor2 = [53.9797,208.47,53.3523] coor3 = [96.6807,121.699,168.477] coor4 = [4.55786,84.7877,51.6438] reader = vtk.vtkNIFTIImageReader() reader.SetFileName(file_name) reader.Update() image = reader.GetOutput() surface = vtk.vtkMarchingCubes() surface.SetInputData(image) surface.SetValue(0,50) surface.Update() skin = surface.GetOutput() polyMapper = vtk.vtkPolyDataMapper() polyMapper.ScalarVisibilityOff() polyMapper.SetInputData(skin) actor = vtk.vtkActor() actor.SetMapper(polyMapper) actor.GetProperty().SetColor(0.8,0.9,0.9) # return vtkactor def CreateBall(coor): ball = vtk.vtkSphereSource() ball.SetCenter(0,0,0) ball.SetRadius(6.0) ball.SetThetaResolution(100) ball.Update() map = vtk.vtkPolyDataMapper() map.SetInputData(ball.GetOutput()) actorx = vtk.vtkActor() actorx.SetMapper(map) actorx.GetProperty().SetColor(1,0,0) actorx.SetPosition(coor[0],coor[1],coor[2]) return actorx ball1 = CreateBall(coor1) ball2 = CreateBall(coor2) ball3 = CreateBall(coor3) ball4 = CreateBall(coor4) win = vtk.vtkRenderWindow() renderer = vtk.vtkRenderer() intact = vtk.vtkRenderWindowInteractor() style = vtk.vtkInteractorStyleTrackballCamera() win.AddRenderer(renderer) renderer.AddActor(actor) #renderer.AddActor(ball1) #renderer.AddActor(ball2) #renderer.AddActor(ball3) renderer.AddActor(ball4) intact.SetInteractorStyle(style) intact.SetRenderWindow(win) win.Render() intact.Start()
def LoadNifti(name): reader = vtk.vtkNIFTIImageReader() reader.SetFileName(name) reader.Update() return reader.GetOutput()
def fused_start(self): # 1 get data path from the first argument given filename_lungs = "segmentedlungs.nii" filename_torso = "segmentedtorso.nii" filename_image = "IMG_0031.nii.gz" #Read the lungs reader_lungs = vtk.vtkNIFTIImageReader() reader_lungs.SetFileName(filename_lungs) # replace filename #Read the torso reader_torso = vtk.vtkNIFTIImageReader() reader_torso.SetFileName(filename_torso) volmpt = vtk.vtkGPUVolumeRayCastMapper() #volmp.SetInputConnection(reader_src.GetOutputPort()) volmpt.SetInputConnection(reader_torso.GetOutputPort()) volmpl = vtk.vtkGPUVolumeRayCastMapper() #volmp.SetInputConnection(reader_src.GetOutputPort()) volmpl.SetInputConnection(reader_lungs.GetOutputPort()) # 4 transfer functions for color and opacity #LUNGS funAlphal = vtk.vtkPiecewiseFunction() # opacity r1 = 1 b1 = 0 g1 = 0 r2 = 0 b2 = 1 g2 = 0 op_int = 1 op_ple = 0.001 low = 50 high = 200 funAlphal.AddPoint(0, 0) funAlphal.AddPoint(1, op_ple) funAlphal.AddPoint(-10, op_ple) #pleura divide by 100 funAlphal.AddPoint(10, op_ple) funAlphal.AddPoint(low, op_int) #internals divide by 10 funAlphal.AddPoint(high, op_int) funAlphal.AddPoint(high + 1, 0) funAlphal.AddPoint(1000, 0) #funAlpha.ClampingOff funColorl = vtk.vtkColorTransferFunction() funColorl.AddRGBPoint(low, r1, g1, b1) funColorl.AddRGBPoint(high, r1, g1, b1) funColorl.AddRGBPoint(-10, r2, g2, b2) funColorl.AddRGBPoint(10, r2, g2, b2) funColorl.AddRGBPoint(high + 1, 0, 0.5, 0) funColorl.AddRGBPoint(1000, 0.5, 0, 0) funColorl.ClampingOff #TORSO funAlphat = vtk.vtkPiecewiseFunction() minimum = 2 maximum = 10 funAlphat.AddPoint(0, 0) funAlphat.AddPoint(minimum, 0.01) funAlphat.AddPoint(maximum, 0.01) funAlphat.AddPoint(11, 0.01) funAlphat.AddPoint(50, 0.01) funAlphat.AddPoint(51, 0.01) funAlphat.AddPoint(500, 0.1) funAlphat.ClampingOff() funColort = vtk.vtkColorTransferFunction() funColort.AddRGBPoint(minimum, 0, 0, 0) funColort.AddRGBPoint(maximum, 1, 1, 1) funColort.AddRGBPoint(11, 0, 1, 0) funColort.AddRGBPoint(50, 0, 0, 1) funColort.AddRGBPoint(51, 0, 0, 0) funColort.AddRGBPoint(500, 0, 0, 0) funColort.ClampingOff() # 6 set up the volume properties with linear interpolation volumePropertyl = vtk.vtkVolumeProperty() volumePropertyl.SetColor(0, funColorl) volumePropertyl.SetScalarOpacity(0, funAlphal) volumePropertyt = vtk.vtkVolumeProperty() volumePropertyt.SetColor(0, funColort) volumePropertyt.SetScalarOpacity(0, funAlphat) volumePropertyl.SetInterpolationTypeToLinear() volumePropertyt.SetInterpolationTypeToLinear() # 7 set up the actor and connect it to the mapper volActl = vtk.vtkVolume() volActl.SetMapper(volmpl) volActl.SetProperty(volumePropertyl) volActt = vtk.vtkVolume() volActt.SetMapper(volmpt) volActt.SetProperty(volumePropertyt) # 8 set up the camera and the renderer renderer = vtk.vtkRenderer() camera = vtk.vtkCamera() camera.SetViewUp(0, 0, 5) camera.SetFocalPoint(250, 250, 250) camera.SetPosition(-800, 150, 100) # 9 set the color of the renderers background renderer.SetBackground(1., 1., 1.) # 10 set the renderers camera as active renderer.SetActiveCamera(camera) # 11 add the volume actor to the renderer #if for actt if int(self.lung_slider.value()) == 1: renderer.AddActor(volActl) if int(self.torso_slider.value()) == 1: renderer.AddActor(volActt) # 12 create a render window ren_win = vtk.vtkRenderWindow() # 13 add renderer to the render window ren_win.AddRenderer(renderer) # 14 create an interactor iren = vtk.vtkRenderWindowInteractor() # 15 connect interactor to the render window iren.SetRenderWindow(ren_win) # 16 start displaying the render window ren_win.Render() #17 make the window interactive (start the interactor) iren.Start()
def vtkMultipleMarchingCubes(atlas,color_map): reader = vtk.vtkNIFTIImageReader() reader.SetFileName(atlas) reader.Update() image = reader.GetOutput() # read color map color_list = ReadColorMap(color_map) # create vtkAppendPolyData Filter poly_append = vtk.vtkAppendPolyData() # run marchingcubes for each label #for color in color_list: for i in range(0,5): color = color_list[i] print "Color: ",color[0] # threshold threshold = vtk.vtkImageThreshold() threshold.SetInputData(image) threshold.ThresholdBetween(color[0],color[0]) threshold.ReplaceOutOn() threshold.SetOutValue(0) threshold.Update() image1 = threshold.GetOutput() marching = vtk.vtkMarchingCubes() marching.SetInputData(image1) marching.SetValue(0,color[0]) marching.Update() area = marching.GetOutput() # create color table colors = vtk.vtkUnsignedCharArray() colors.SetNumberOfComponents(3) colors.SetName(color[1]) colors.InsertNextTupleValue([color[0],1,1]) area.GetCellData().Update() area.GetCellData().SetScalars(colors) #area.GetPointData().SetScalars(colors) poly_append.AddInputData(area) poly_append.Update() # clean print "Cleaning... " poly_clean = vtk.vtkCleanPolyData() poly_clean.SetInputData(poly_append.GetOutput()) poly_clean.Update() # visualization print "visualizing..." mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(poly_append.GetOutput()) #mapper.ScalarVisibilityOn() actor = vtk.vtkActor() actor.SetMapper(mapper) renderer = vtk.vtkRenderer() renderer.AddActor(actor) window = vtk.vtkRenderWindow() window.AddRenderer(renderer) interactor = vtk.vtkRenderWindowInteractor() window.SetInteractor(interactor) window.Render() interactor.Start() pass
def vtkExtractOuterSurface(filename): reader = vtk.vtkNIFTIImageReader() reader.SetFileName(filename) reader.Update() image1 = reader.GetOutput() dim = image1.GetDimensions() print dim for x in [0, dim[0] - 1]: for y in [0, dim[1] - 1]: image1.SetScalarComponentFromFloat(x, y, dim[2] - 1, 0, 0.0) image1.SetScalarComponentFromFloat(x, y, dim[2] - 2, 0, 0.0) image1.SetScalarComponentFromFloat(x, y, dim[2] - 3, 0, 0.0) image1.SetScalarComponentFromFloat(x, y, dim[2] - 4, 0, 0.0) image1.SetScalarComponentFromFloat(x, y, 0, 0, 0.0) image1.SetScalarComponentFromFloat(x, y, 1, 0, 0.0) image1.SetScalarComponentFromFloat(x, y, 2, 0, 0.0) image1.SetScalarComponentFromFloat(x, y, 3, 0, 0.0) for y in [0, dim[1] - 1]: for z in [0, dim[2] - 1]: image1.SetScalarComponentFromFloat(dim[0] - 1, y, z, 0, 0.0) image1.SetScalarComponentFromFloat(dim[0] - 2, y, z, 0, 0.0) image1.SetScalarComponentFromFloat(dim[0] - 3, y, z, 0, 0.0) image1.SetScalarComponentFromFloat(dim[0] - 4, y, z, 0, 0.0) image1.SetScalarComponentFromFloat(dim[0] - 5, y, z, 0, 0.0) image1.SetScalarComponentFromFloat(0, y, z, 0, 0.0) image1.SetScalarComponentFromFloat(1, y, z, 0, 0.0) image1.SetScalarComponentFromFloat(2, y, z, 0, 0.0) image1.SetScalarComponentFromFloat(3, y, z, 0, 0.0) for x in [0, dim[0] - 1]: for z in [0, dim[2] - 1]: image1.SetScalarComponentFromFloat(x, 0, z, 0, 0.0) image1.SetScalarComponentFromFloat(x, 1, z, 0, 0.0) image1.SetScalarComponentFromFloat(x, 2, z, 0, 0.0) image1.SetScalarComponentFromFloat(x, 3, z, 0, 0.0) image1.SetScalarComponentFromFloat(x, 4, z, 0, 0.0) image1.SetScalarComponentFromFloat(x, 5, z, 0, 0.0) image1.SetScalarComponentFromFloat(x, dim[1] - 1, z, 0, 0.0) image1.SetScalarComponentFromFloat(x, dim[1] - 2, z, 0, 0.0) image1.SetScalarComponentFromFloat(x, dim[1] - 3, z, 0, 0.0) image1.SetScalarComponentFromFloat(x, dim[1] - 4, z, 0, 0.0) image1.SetScalarComponentFromFloat(x, dim[1] - 5, z, 0, 0.0) # threshold threshold = vtk.vtkImageThreshold() threshold.SetInputData(image1) threshold.ThresholdBetween(15, 1000) threshold.ReplaceInOn() threshold.SetInValue(200) threshold.ReplaceOutOn() threshold.SetOutValue(0) threshold.Update() image = threshold.GetOutput() # write to image writer = vtk.vtkNIFTIImageWriter() writer.SetFileName("C:/Users/QIN Shuo/Desktop/test.nii") writer.SetInputData(image) writer.Update() # marching here marching = vtk.vtkMarchingCubes() marching.SetInputData(image) marching.SetValue(0, 8) marching.Update() skin = marching.GetOutput() connector = vtk.vtkConnectivityFilter() connector.SetInputData(skin) connector.SetExtractionModeToLargestRegion() connector.Update() geo = vtk.vtkGeometryFilter() geo.SetInputData(connector.GetOutput()) geo.Update() area = geo.GetOutput() ## fill holes # filler = vtk.vtkFillHolesFilter() # filler.SetInputData(area) # filler.SetHoleSize(1E6) # filler.Update() # area = filler.GetOutput() # filter = vtk.vtkPolyDataConnectivityFilter() # filter.SetInputData(area) # filter.SetExtractionModeToSpecifiedRegions() # filter.AddSpecifiedRegion(0) # filter.Update() # area = filter.GetOutput() # visualization print "visualizing..." mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(area) # mapper.ScalarVisibilityOn() actor = vtk.vtkActor() actor.SetMapper(mapper) # actor.GetProperty().SetOpacity(0.7) renderer = vtk.vtkRenderer() renderer.AddActor(actor) window = vtk.vtkRenderWindow() window.AddRenderer(renderer) interactor = vtk.vtkRenderWindowInteractor() window.SetInteractor(interactor) window.Render() interactor.Start()
def __init__(self, filename, properties, frame): # load the data (source) reader_src = vtk.vtkNIFTIImageReader() reader_src.SetFileName(filename) # filter cast_filter = vtk.vtkImageCast() cast_filter.SetInputConnection(reader_src.GetOutputPort()) cast_filter.SetOutputScalarTypeToUnsignedShort() # marching cubes (mapper) contour = vtk.vtkMarchingCubes() contour.SetInputConnection(cast_filter.GetOutputPort()) contour.ComputeNormalsOn() contour.ComputeGradientsOn() contour.SetValue(0, 100) con_mapper = vtk.vtkPolyDataMapper() con_mapper.SetInputConnection(contour.GetOutputPort()) prop = vtk.vtkProperty() opac = properties[0] amb = properties[1] diff = properties[2] spec = properties[3] specpwr = 1 red = properties[4] blue = properties[5] green = properties[6] prop.SetOpacity(opac) prop.SetAmbient(amb) prop.SetDiffuse(diff) prop.SetSpecular(spec) prop.SetSpecularPower(specpwr) prop.SetColor(red, green, blue) # actor actor = vtk.vtkActor() actor.SetMapper(con_mapper) actor.SetProperty(prop) # setup the camera and the renderer self.renderer = vtk.vtkRenderer() camera = self.renderer.MakeCamera() camera.SetViewUp(0., 0., -.1) camera.SetPosition(-400, 100, 100) self.renderer.SetBackground(1., 1., 1.) # so to white self.renderer.SetActiveCamera(camera) self.renderer.AddActor(actor) # window interaction (camera movement etc) self.interactor = QVTKRenderWindowInteractor(frame) style = vtk.vtkInteractorStyleTrackballCamera() self.interactor.SetInteractorStyle(style) self.interactor.Initialize()
def main(): colors = vtk.vtkNamedColors() #fileName = get_program_parameters() colors.SetColor('SkinColor', [255, 125, 64, 255]) colors.SetColor('BkgColor', [51, 77, 102, 255]) # Create the renderer, the render window, and the interactor. The renderer # draws into the render window, the interactor enables mouse- and # keyboard-based interaction with the data within the render window. # aRenderer = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(aRenderer) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) reader = vtk.vtkNIFTIImageReader() reader.SetFileName('medical_files/pancreas_004.nii') reader.Update() # An isosurface, or contour value of 500 is known to correspond to the # skin of the patient. skinExtractor = vtk.vtkMarchingCubes() skinExtractor.SetInputConnection(reader.GetOutputPort()) skinExtractor.SetValue(0, 500) skinMapper = vtk.vtkPolyDataMapper() skinMapper.SetInputConnection(skinExtractor.GetOutputPort()) skinMapper.ScalarVisibilityOff() skin = vtk.vtkActor() skin.SetMapper(skinMapper) skin.GetProperty().SetDiffuseColor(colors.GetColor3d('SkinColor')) # An outline provides context around the data. # outlineData = vtk.vtkOutlineFilter() outlineData.SetInputConnection(reader.GetOutputPort()) mapOutline = vtk.vtkPolyDataMapper() mapOutline.SetInputConnection(outlineData.GetOutputPort()) outline = vtk.vtkActor() outline.SetMapper(mapOutline) outline.GetProperty().SetColor(colors.GetColor3d('Black')) # It is convenient to create an initial view of the data. The FocalPoint # and Position form a vector direction. Later on (ResetCamera() method) # this vector is used to position the camera to look at the data in # this direction. aCamera = vtk.vtkCamera() aCamera.SetViewUp(0, 0, -1) aCamera.SetPosition(0, -1, 0) aCamera.SetFocalPoint(0, 0, 0) aCamera.ComputeViewPlaneNormal() aCamera.Azimuth(30.0) aCamera.Elevation(30.0) # Actors are added to the renderer. An initial camera view is created. # The Dolly() method moves the camera towards the FocalPoint, # thereby enlarging the image. # aRenderer.AddActor(outline) aRenderer.AddActor(skin) aRenderer.SetActiveCamera(aCamera) aRenderer.ResetCamera() aCamera.Dolly(1.5) # Set a background color for the renderer and set the size of the # render window (expressed in pixels). aRenderer.SetBackground(colors.GetColor3d('BkgColor')) renWin.SetSize(640, 480) renWin.SetWindowName('MedicalDemo1') # Note that when camera movement occurs (as it does in the Dolly() # method), the clipping planes often need adjusting. Clipping planes # consist of two planes: near and far along the view direction. The # near plane clips out objects in front of the plane the far plane # clips out objects behind the plane. This way only what is drawn # between the planes is actually rendered. aRenderer.ResetCameraClippingRange() # Initialize the event loop and then start it. iren.Initialize() iren.Start()
def create(self, filename, orie='coronal', maxint=1000, op=0.99): # set up the source source = vtk.vtkNIFTIImageReader() source.SetFileName(filename) self.source = source # Calculate the center of the volume source.Update() (xMin, xMax, yMin, yMax, zMin, zMax) = source.GetExecutive().GetWholeExtent( source.GetOutputInformation(0)) (xSpacing, ySpacing, zSpacing) = source.GetOutput().GetSpacing() (x0, y0, z0) = source.GetOutput().GetOrigin() self.center = [ x0 + xSpacing * 0.5 * (xMin + xMax), y0 + ySpacing * 0.5 * (yMin + yMax), z0 + zSpacing * 0.5 * (zMin + zMax) ] # Matrices for axial, coronal, sagittal, oblique view orientations orie_mat = _get_orie_mat(self, orie=orie) # Extract a slice in the desired orientation self.reslice = vtk.vtkImageReslice() self.reslice.SetInputConnection(source.GetOutputPort()) self.reslice.SetOutputDimensionality(2) self.reslice.SetResliceAxes(orie_mat) self.reslice.SetInterpolationModeToLinear() # Create a greyscale lookup table if filename == 'T1w_acpc_dc_restore_brain.nii.gz': table = vtk.vtkLookupTable() table.SetRange(0, maxint) # image intensity range table.SetValueRange(0.0, 1.0) # from black to white table.SetSaturationRange(0.0, 0.0) # no color saturation table.SetRampToLinear() table.Build() # Map the image through the lookup table color = vtk.vtkImageMapToColors() color.SetLookupTable(table) color.SetInputConnection(self.reslice.GetOutputPort()) # Display the image actor = vtk.vtkImageActor() actor.GetMapper().SetInputConnection(color.GetOutputPort()) actor.GetProperty().SetOpacity(op) else: table = vtk.vtkLookupTable() table.SetNumberOfTableValues(2) table.Build() nc = vtk.vtkNamedColors() table.SetTableValue(0, nc.GetColor4d("Black")) table.SetTableValue(1, nc.GetColor4d(self.colour)) color = vtk.vtkImageMapToColors() color.SetLookupTable(table) color.SetInputConnection(self.reslice.GetOutputPort()) actor = vtk.vtkImageActor() actor.GetMapper().SetInputConnection(color.GetOutputPort()) actor.GetProperty().SetOpacity(op) return actor, self.center, self.reslice
def main(): global v16 global skinExtractor global skinNormals global skinMapper global skin global outline global outlineData global mapOutline global aCamera global aRenderer global renWin v16 = vtk.vtkNIFTIImageReader() skinExtractor = vtk.vtkContourFilter() skinNormals = vtk.vtkPolyDataNormals() skinMapper = vtk.vtkPolyDataMapper() # 映射器 skin = vtk.vtkActor() outlineData = vtk.vtkOutlineFilter() mapOutline = vtk.vtkPolyDataMapper() outline = vtk.vtkActor() aCamera = vtk.vtkCamera() renWin = vtk.vtkRenderWindow() aRenderer = vtk.vtkRenderer() # 渲染器 # source—filter——mapper——actor——render——renderwindow——interactor renWin.AddRenderer(aRenderer) v16.SetFileName(base + 'stick0.nii.gz') v16.TimeAsVectorOn() v16.Update() skinExtractor.SetInputConnection(v16.GetOutputPort()) skinExtractor.SetValue(0, 500) skinNormals = vtk.vtkPolyDataNormals() skinNormals.SetInputConnection(skinExtractor.GetOutputPort()) skinNormals.SetFeatureAngle(60.0) skinMapper.SetInputConnection(skinNormals.GetOutputPort()) skinMapper.ScalarVisibilityOff() skin.SetMapper(skinMapper) outlineData.SetInputConnection(v16.GetOutputPort()) mapOutline = vtk.vtkPolyDataMapper() mapOutline.SetInputConnection(outlineData.GetOutputPort()) outline.SetMapper(mapOutline) outline.GetProperty().SetColor(0, 0, 0) aCamera.SetViewUp(0, 0, -1) aCamera.SetPosition(0, 1, 0) aCamera.SetFocalPoint(0, 0, 0) aCamera.ComputeViewPlaneNormal() # Actors are added to the renderer.An initial camera view is created. # The Dolly() method moves the camera towards the Focal Point, # thereby enlarging the image. aRenderer.AddActor(outline) aRenderer.AddActor(skin) aRenderer.SetActiveCamera(aCamera) aRenderer.ResetCamera() aCamera.Dolly(1.5) aRenderer.SetBackground(1, 1, 1) renWin.SetSize(640, 480) aRenderer.ResetCameraClippingRange() iren = vtk.vtkRenderWindowInteractor() # 窗口交互 iren.SetRenderWindow(renWin) iren.Initialize() cb = TimeEvent() iren.AddObserver('TimerEvent', cb.execute) iren.CreateRepeatingTimer(50) iren.Start()
def rendering(dataPath, dualSurface, img1_path, img2_path, img3_path, img4_path): import SimpleITK as sitk # # writing_path='data.nii' # reader = itk.ImageFileReader.New(FileName=dataPath) # reader.SetFileName(dataPath) # reader.Update() # image_input=reader.GetOutput() # # # imagee_out=sitk.GetImageFromArray(imagee) # # writer =itk.ImageFileWriter.New() # writer.SetFileName(writing_path) # writer.SetInput(image_input) # writer.Update() img_facial = sitk.ReadImage(img1_path) img_facial.SetSpacing(Sspacing) img_sigmoid = sitk.ReadImage(img2_path) img_sigmoid.SetSpacing(Sspacing) img_tensor = sitk.ReadImage(img3_path) img_tensor.SetSpacing(Sspacing) img_inner = sitk.ReadImage(img4_path) img_inner.SetSpacing(Sspacing) segmented_path = "segmented_path.nrrd" img_segmented = sitk.AddImageFilter() img_1 = img_segmented.Execute(img_facial, img_sigmoid) img_segmented2 = sitk.AddImageFilter() img_2 = img_segmented2.Execute(img_tensor, img_inner) img_segmented3 = sitk.AddImageFilter() img_3 = img_segmented3.Execute(img_1, img_2) img_3.SetOrigin([0, 0, 0]) sitk.WriteImage(img_3, segmented_path) img = sitk.ReadImage(dataPath) img.SetSpacing(Sspacing) img_subtract = sitk.SubtractImageFilter() img = img_subtract.Execute(img, img_3) img.SetSpacing(Sspacing) img.SetOrigin([0, 0, 0]) sitk.WriteImage(img, dataPath) # Read the data if dataPath.endswith('.nii'): reader = vtk.vtkNIFTIImageReader() # reader.SetSpacing(1,1,1) reader.SetFileName(dataPath) reader.Update() elif dataPath.endswith('.nhdr') or dataPath.endswith('.nrrd'): reader = vtk.vtkNrrdReader() reader.SetFileName(dataPath) # reader.SetSpacing(0.18,0.18,0.18) reader.Update() else: reader = vtk.vtkDICOMImageReader() reader.SetDirectoryName(dataPath) reader.Update() if segmented_path.endswith('.nii'): reader2 = vtk.vtkNIFTIImageReader() # reader.SetSpacing(1,1,1) reader2.SetFileName(segmented_path) reader2.Update() elif segmented_path.endswith('.nhdr') or dataPath.endswith('.nrrd'): reader2 = vtk.vtkNrrdReader() reader2.SetFileName(segmented_path) # reader.SetSpacing(0.18,0.18,0.18) reader2.Update() else: reader2 = vtk.vtkDICOMImageReader() reader2.SetDirectoryName(segmented_path) reader2.Update() stl1 = "part1_L1537.stl" stl2 = "part2_L1537.stl" # Smooth the image filteredImage = gaussianFilter(reader) # Surface rendering if dualSurface: dualSurfaceRendering(filteredImage, stl1) # Smooth the image filteredImage2 = gaussianFilter(reader2) # Surface rendering if dualSurface: dualSurfaceRendering_two(filteredImage, filteredImage2, stl2)
''' Samaneh Nobakht This script Segments the brain and head based on hipocampus masks for preparing binary masks for 3Dvisualisation in VR ''' import vtk # Load masks of both hippocampi, head with skull and brain HipoLeft = vtk.vtkNIFTIImageReader() HipoLeft.SetFileName("head_bet_left_Hipocampus_Resample.nii") HipoLeft.Update() HipoRight = vtk.vtkNIFTIImageReader() HipoRight.SetFileName("head_bet_Right_Hipocampus_Resample.nii") HipoRight.Update() #Load original image Head = vtk.vtkNIFTIImageReader() Head.SetFileName("head.nii") Head.Update() #Load skull stripped mask Brain = vtk.vtkNIFTIImageReader() Brain.SetFileName("head_bet.nii") Brain.Update() BrainMask = vtk.vtkImageData() BrainMask.DeepCopy(Brain.GetOutput()) #Smooth the original image smooth = vtk.vtkImageMedian3D() smooth.SetInputConnection(Head.GetOutputPort()) smooth.SetKernelSize(3, 3, 3) smooth.Update() Salt = smooth.GetOutput() HeadMask = vtk.vtkImageData() HeadMask.DeepCopy(Salt)
def setDataset(self): self.ParcelationNumpy = nib.load( self.parcelation_filename).get_data().astype(np.uint8) self.TemplateNumpy = nib.load( self.template_filename).get_data().astype(np.uint8) img1 = nib.load(self.parcelation_filename) img2 = nib.load(self.template_filename) hdr1 = img1.header hdr2 = img2.header a1 = hdr1['pixdim'][1:4] a2 = hdr2['pixdim'][1:4] self.ParcelationNumpy.shape self.PixX = 1 self.PixY = 1 self.PixZ = 1 self.ParcelationReader = vtk.vtkImageImport() self.ParcelationReader = copy.deepcopy(self.ParcelationNumpy) self.StrParcelationNumpy = str(self.ParcelationNumpy) self.ParcelationReader.SetImportVoidPointer( self.StrParcelationNumpy, len(self.StrParcelationNumpy) * 32) self.ParcelationReader.SetDataScalarTypeToUnsignedChar() print np.shape(self.ParcelationNumpy) x, y, z = np.shape(self.ParcelationNumpy) self.ParcelationReader.SetDataExtent(0, x - 1, 0, y - 1, 0, z - 1) self.ParcelationReader.SetWholeExtent(0, x - 1, 0, y - 1, 0, z - 1) self.TemplateReader = vtk.vtkImageImport() self.StrTemplateNumpy = str(self.TemplateNumpy) self.TemplateReader.CopyImportVoidPointer(self.StrTemplateNumpy, len(self.StrTemplateNumpy)) self.TemplateReader.SetDataScalarTypeToUnsignedChar() x, y, z = np.shape(self.TemplateNumpy) self.TemplateReader.SetDataExtent(0, x - 1, 0, y - 1, 0, z - 1) self.TemplateReader.SetWholeExtent(0, x - 1, 0, y - 1, 0, z - 1) if vtk.VTK_MAJOR_VERSION <= 5: self.ParcelationReader = vtk.vtkNIFTIImageReader() else: self.ParcelationReader = vtk.vtkNIFTIImageReader() self.ParcelationReader.SetFileName(self.parcelation_filename) self.ParcelationNumpy = nib.load( self.parcelation_filename).get_data().astype(np.uint8) self.ParcelationReader.Update() self.TemplateReader = vtk.vtkNIFTIImageReader() self.TemplateReader.SetFileName(self.template_filename) self.TemplateNumpy = nib.load( self.template_filename).get_data().astype(np.uint8) self.TemplateReader.Update() self.Templatedmc = vtk.vtkDiscreteMarchingCubes() self.dmc = vtk.vtkDiscreteMarchingCubes() self.TemplateMapper = vtk.vtkPolyDataMapper() self.mapper2 = vtk.vtkPolyDataMapper() self.outline = vtk.vtkOutlineFilter() self.TemplateActor = vtk.vtkActor() self.OutlineActor = vtk.vtkActor() self.renderer = vtk.vtkRenderer() self.renderer.SetBackground(1, 1, 1) self.renderWin = vtk.vtkRenderWindow() self.renderWin.AddRenderer(self.renderer) self.axes2 = vtk.vtkCubeAxesActor2D() self.axes3 = vtk.vtkCubeAxesActor2D() self.colorData = vtk.vtkUnsignedCharArray() self.colorData.SetName('colors') # Any name will work here. self.colorData.SetNumberOfComponents(3) self.TextProperty = vtk.vtkTextProperty() self.TextProperty.SetColor(0, 0, 0) self.TextProperty.SetFontSize(100) self.axesActor = vtk.vtkAnnotatedCubeActor() self.axes = vtk.vtkOrientationMarkerWidget() self.renderInteractor = QVTKRenderWindowInteractor(self, rw=self.renderWin) self.BoxLayoutView.addWidget(self.renderInteractor) self.picker = vtk.vtkCellPicker() self.template_data = None
def __init__(self, img_path, mask_path): self.peel = [] self.peelActors = [] T1_reader = vtk.vtkNIFTIImageReader() T1_reader.SetFileName(img_path) T1_reader.Update() # self.refImage = vtk.vtkImageData() self.refImage = T1_reader.GetOutput() mask_reader = vtk.vtkNIFTIImageReader() mask_reader.SetFileName(mask_path) mask_reader.Update() mc = vtk.vtkContourFilter() mc.SetInputConnection(mask_reader.GetOutputPort()) mc.SetValue(0, 1) mc.Update() refSurface = vtk.vtkPolyData() refSurface = mc.GetOutput() tmpPeel = vtk.vtkPolyData() tmpPeel = downsample(refSurface) mask_sFormMatrix = vtk.vtkMatrix4x4() mask_sFormMatrix = mask_reader.GetSFormMatrix() mask_ijk2xyz = vtk.vtkTransform() mask_ijk2xyz.SetMatrix(mask_sFormMatrix) mask_ijk2xyz_filter = vtk.vtkTransformPolyDataFilter() mask_ijk2xyz_filter.SetInputData(tmpPeel) mask_ijk2xyz_filter.SetTransform(mask_ijk2xyz) mask_ijk2xyz_filter.Update() tmpPeel = smooth(mask_ijk2xyz_filter.GetOutput()) tmpPeel = fixMesh(tmpPeel) tmpPeel = cleanMesh(tmpPeel) tmpPeel = upsample(tmpPeel) tmpPeel = smooth(tmpPeel) tmpPeel = fixMesh(tmpPeel) tmpPeel = cleanMesh(tmpPeel) # sFormMatrix = vtk.vtkMatrix4x4() qFormMatrix = T1_reader.GetQFormMatrix() # sFormMatrix = T1_reader.GetSFormMatrix() refImageSpace2_xyz_transform = vtk.vtkTransform() refImageSpace2_xyz_transform.SetMatrix(qFormMatrix) self.refImageSpace2_xyz = vtk.vtkTransformPolyDataFilter() self.refImageSpace2_xyz.SetTransform(refImageSpace2_xyz_transform) xyz2_refImageSpace_transform = vtk.vtkTransform() qFormMatrix.Invert() xyz2_refImageSpace_transform.SetMatrix(qFormMatrix) self.xyz2_refImageSpace = vtk.vtkTransformPolyDataFilter() self.xyz2_refImageSpace.SetTransform(xyz2_refImageSpace_transform) #self.currentPeel = vtk.vtkPolyData() self.currentPeel = tmpPeel self.currentPeelNo = 0 self.mapImageOnCurrentPeel() newPeel = vtk.vtkPolyData() newPeel.DeepCopy(self.currentPeel) self.peel.append(newPeel) self.currentPeelActor = vtk.vtkActor() self.getCurrentPeelActor() self.peelActors.append(self.currentPeelActor) self.numberOfPeels = 2 self.peelDown()
''' Samaneh Nobakht This scripts produces 6 separate meshes of Upper brain, lower brain, upper head, lower head and left and right hippocampi to export to VR''' import vtk #Define renderer renderer = vtk.vtkRenderer() #Read upper head mask reader1 = vtk.vtkNIFTIImageReader() reader1.SetFileName("Head1.nii") reader1.Update() #Create mesh of the upper head up to level of hippocampi isoSurf1 = vtk.vtkMarchingCubes() isoSurf1.SetInputData(reader1.GetOutput()) isoSurf1.SetValue(0, 1) mapper1 = vtk.vtkPolyDataMapper() mapper1.SetInputConnection(isoSurf1.GetOutputPort()) mapper1.ScalarVisibilityOff() #Create property for actor prop1 = vtk.vtkProperty() prop1.SetColor(1.0, 0.0, 0.0) prop1.SetOpacity(0.6) #Create actor and set mapper and property actor1 = vtk.vtkActor() actor1.SetMapper(mapper1) actor1.SetProperty(prop1) #Add actor to renderer renderer.AddActor(actor1) #................... #Load mask of lower head from hippocampus to chin reader2 = vtk.vtkNIFTIImageReader() reader2.SetFileName("Head2.nii") reader2.Update()
import vtk reader = vtk.vtkNIFTIImageReader() reader.SetFileName("pt101_T1_o_brain.nii") reader.Update() image = reader.GetOutput() def TightBounder(image): yvector = [] zvector = [] xvector = [] for z in range(0, image.GetDimensions()[2]): for y in range(0, image.GetDimensions()[1]): for x in range(0, image.GetDimensions()[0]): vox = image.GetScalarComponentAsFloat(x, y, z, 0) if vox == 1: yvector.append(y) zvector.append(z) xvector.append(x) xmin = min(xvector) xmax = max(xvector) zmin = min(zvector) zmax = max(zvector) ymin = min(yvector) ymax = max(yvector) Array = [xmin, xmax, ymin, ymax, zmin, zmax] return (Array)
def mesh_to_volume(poly_data, reference_path): """ ASSUME INPUT IN RAS TODO: stop reading and writing so much stuff Write to buffer? Bytes? Investigate this """ def check_header(nifti_image): orientation = ''.join(nib.aff2axcodes(nifti_image.affine)) is_ras = orientation == 'RAS' if not is_ras: message = ('RAS orientation expected.' f' Detected orientation: {orientation}') raise Exception(message) qform_code = nifti_image.header['qform_code'] if qform_code == 0: raise Exception(f'qform code for {reference_path} is 0') nii = nib.load(str(reference_path)) check_header(nii) image_stencil_array = np.ones(nii.shape, dtype=np.uint8) image_stencil_nii = nib.Nifti1Image(image_stencil_array, nii.affine) # nii.get_qform()) image_stencil_nii.header['qform_code'] = 1 image_stencil_nii.header['sform_code'] = 0 with NamedTemporaryFile(suffix='.nii') as f: stencil_path = f.name image_stencil_nii.to_filename(stencil_path) image_stencil_reader = vtk.vtkNIFTIImageReader() image_stencil_reader.SetFileName(stencil_path) image_stencil_reader.Update() image_stencil = image_stencil_reader.GetOutput() xyz_to_ijk = image_stencil_reader.GetQFormMatrix() if xyz_to_ijk is None: warnings.warn('No qform found. Using sform') xyz_to_ijk = image_stencil_reader.GetSFormMatrix() xyz_to_ijk.Invert() transform = vtk.vtkTransform() transform.SetMatrix(xyz_to_ijk) transform_poly_data_filter = vtk.vtkTransformPolyDataFilter() transform_poly_data_filter.SetTransform(transform) transform_poly_data_filter.SetInputData(poly_data) transform_poly_data_filter.Update() pd_ijk = transform_poly_data_filter.GetOutput() poly_data_to_image_stencil = vtk.vtkPolyDataToImageStencil() poly_data_to_image_stencil.SetInputData(pd_ijk) poly_data_to_image_stencil.SetOutputSpacing(image_stencil.GetSpacing()) poly_data_to_image_stencil.SetOutputOrigin(image_stencil.GetOrigin()) poly_data_to_image_stencil.SetOutputWholeExtent(image_stencil.GetExtent()) poly_data_to_image_stencil.Update() stencil = vtk.vtkImageStencil() stencil.SetInputData(image_stencil) stencil.SetStencilData(poly_data_to_image_stencil.GetOutput()) stencil.SetBackgroundValue(0) stencil.Update() image_output = stencil.GetOutput() data_object = dsa.WrapDataObject(image_output) array = data_object.PointData['NIFTI'] array = array.reshape(nii.shape, order='F') # as order='C' didn't work array = check_qfac(nii, array) num_voxels = array.sum() if num_voxels == 0: warnings.warn(f'Empty stencil mask for reference {reference_path}') output_image = nib_to_sitk(array, nii.affine) return output_image
def ReadNii(filename): reader = vtk.vtkNIFTIImageReader() reader.SetFileName(filename) reader.Update() return reader.GetOutput()
def TestDisplay(file1): """Display the output""" inpath = os.path.join(str(VTK_DATA_ROOT), "Data", file1) reader = vtk.vtkNIFTIImageReader() reader.SetFileName(inpath) reader.Update() size = reader.GetOutput().GetDimensions() center = reader.GetOutput().GetCenter() spacing = reader.GetOutput().GetSpacing() center1 = (center[0], center[1], center[2]) center2 = (center[0], center[1], center[2]) if size[2] % 2 == 1: center1 = (center[0], center[1], center[2] + 0.5*spacing[2]) if size[0] % 2 == 1: center2 = (center[0] + 0.5*spacing[0], center[1], center[2]) vrange = reader.GetOutput().GetScalarRange() map1 = vtk.vtkImageSliceMapper() map1.BorderOn() map1.SliceAtFocalPointOn() map1.SliceFacesCameraOn() map1.SetInputConnection(reader.GetOutputPort()) map2 = vtk.vtkImageSliceMapper() map2.BorderOn() map2.SliceAtFocalPointOn() map2.SliceFacesCameraOn() map2.SetInputConnection(reader.GetOutputPort()) slice1 = vtk.vtkImageSlice() slice1.SetMapper(map1) slice1.GetProperty().SetColorWindow(vrange[1]-vrange[0]) slice1.GetProperty().SetColorLevel(0.5*(vrange[0]+vrange[1])) slice2 = vtk.vtkImageSlice() slice2.SetMapper(map2) slice2.GetProperty().SetColorWindow(vrange[1]-vrange[0]) slice2.GetProperty().SetColorLevel(0.5*(vrange[0]+vrange[1])) ratio = size[0]*1.0/(size[0]+size[2]) ren1 = vtk.vtkRenderer() ren1.SetViewport(0,0,ratio,1.0) ren2 = vtk.vtkRenderer() ren2.SetViewport(ratio,0.0,1.0,1.0) ren1.AddViewProp(slice1) ren2.AddViewProp(slice2) cam1 = ren1.GetActiveCamera() cam1.ParallelProjectionOn() cam1.SetParallelScale(0.5*spacing[1]*size[1]) cam1.SetFocalPoint(center1[0], center1[1], center1[2]) cam1.SetPosition(center1[0], center1[1], center1[2] - 100.0) cam2 = ren2.GetActiveCamera() cam2.ParallelProjectionOn() cam2.SetParallelScale(0.5*spacing[1]*size[1]) cam2.SetFocalPoint(center2[0], center2[1], center2[2]) cam2.SetPosition(center2[0] + 100.0, center2[1], center2[2]) if "-I" in sys.argv: style = vtk.vtkInteractorStyleImage() style.SetInteractionModeToImageSlicing() iren = vtk.vtkRenderWindowInteractor() iren.SetInteractorStyle(style) renwin = vtk.vtkRenderWindow() renwin.SetSize(size[0] + size[2], size[1]) renwin.AddRenderer(ren1) renwin.AddRenderer(ren2) renwin.Render() if "-I" in sys.argv: renwin.SetInteractor(iren) iren.Initialize() iren.Start() return renwin
def ani_start(self): filename = "segmentedlungs.nii.gz" # 2 set up the source reader_src = vtk.vtkNIFTIImageReader( ) #this class reads Nifti files (medical images) reader_src.SetFileName( filename ) #here we should read the name of our medical image file in the class and put in the class # set the origin of the data to its center reader_src.Update() data = reader_src.GetOutput( ) # Get the output data object for a port on this algorithm. center = data.GetCenter( ) #Get the center of the bounding box of the dataset. data.SetOrigin(-center[0], -center[0], -center[0]) #Set the origin of the image dataset. # 3 (filter) cast_filter = vtk.vtkImageCast( ) #Image data type casting filter: casts the input type to match the output type in the image processing pipeline cast_filter.SetInputConnection( reader_src.GetOutputPort() ) #SetInputData(..) assign a data object as input. Note that this method does not establish a pipeline connection cast_filter.SetOutputScalarTypeToUnsignedShort( ) #set the desired output scalar type to cast to # 4 marching cubes (mapper) contour = vtk.vtkMarchingCubes( ) #generate isosurface(s) from volume. It is a filter that takes as input a volume (e.g., 3D structured point set) and generates on #output one or more isosurfaces. One or more contour values must be specified to generate the isosurfaces. Alternatively, you can #specify amin/max scalar range and the number of contours to generate a series of evenlu spaced contour values. contour.SetInputConnection(cast_filter.GetOutputPort() ) #Here we connect the input to the output? contour.ComputeNormalsOff( ) #set/get the computation of normals. Normal computation is fairly expensice in both time and storage. If the output data will be processed #filters that modify topology or geometry, it may be wise to turn Normals and Gradients off. contour.ComputeGradientsOff( ) #set/get the computation of graients. Gradient computation is fairly expensive in both time and storage. Note that id ComputeNormals #is on, gradients will have to be calculated, but will nmot be stored in the output dataset. If the output data will be processed by #the filters that modify topology or geometry, it may be wise to turn Normals and Gradients off. contour.SetNumberOfContours(2) contour.SetValue(0, 100) contour.SetValue(1, 100) # 4.5 Decimation deci = vtk.vtkDecimatePro() deci.SetInputConnection(contour.GetOutputPort()) deci.SetTargetReduction(0.9) deci.PreserveTopologyOff() # 4.5 Smoothing smoother = vtk.vtkWindowedSincPolyDataFilter() smoother = vtk.vtkSmoothPolyDataFilter() smoother.SetInputConnection(deci.GetOutputPort()) smoother.SetNumberOfIterations(500) #Normals if Off in Marching cubes normals = vtk.vtkPolyDataNormals() normals.SetInputConnection(smoother.GetOutputPort()) normals.FlipNormalsOn() con_mapper = vtk.vtkPolyDataMapper( ) #Uses a OpenGL to do the actual rendering con_mapper.SetInputConnection( contour.GetOutputPort() ) #Get the output port of the contour contour and connects it with input con_mapper.ScalarVisibilityOff() # 5 set up the actor #set properties for our actor prop = vtk.vtkProperty() opac = 0.2 amb = 0.2 diff = 0.2 spec = 0.2 specpwr = 1 red = 0.6 green = 0.2 blue = 0.2 prop.SetOpacity(opac) prop.SetAmbient(amb) prop.SetDiffuse(diff) prop.SetSpecular(spec) prop.SetSpecularPower(specpwr) prop.SetColor(red, blue, green) #con_mapper.ScalarVisibilityOff() actor = vtk.vtkActor( ) # a concrete implementattion of the abstract class vtkActor is different. actor.SetMapper( con_mapper ) # this is the method that is used to connect an actor to the end of a visualization pipeline, i.e. the mapper. actor.SetProperty(prop) #assign properties to our actor # 6 set up the camera and the renderer renderer = vtk.vtkRenderer() camera = vtk.vtkCamera() #this I suppose is the camera camera.SetViewUp(0., 1., 0.) camera.SetPosition(-500, 100, 100) camera.SetFocalPoint(0, 0, 0) # 7 set the color of the renderers background to black (0., 0., 0.) renderer.SetBackground(1, 1, 1) # 8 set the renderers canera as active renderer.SetActiveCamera(camera) # 9 add the volume actor to the renderer renderer.AddActor(actor) # 10 create a render window ren_win = vtk.vtkRenderWindow() # 11 add renderer to the render window ren_win.AddRenderer(renderer) # 12 create an interactor iren = vtk.vtkRenderWindowInteractor() # 13 connect interactor to the render window iren.SetRenderWindow(ren_win) # start displaying the render window ren_win.Render() # animation iren.Initialize( ) # need to initialize our interactor before we can add things. Initializes the event handlers without an XtAppContext. #This is good for when you don't have a user interface, but you still want to have mouse interaction. timer_call = TimerCallback(actor) iren.AddObserver( 'TimerEvent', timer_call.execute ) #Add an event callback function(vtkObject, int) for an event type. Returns a handle that can be used with RemoveEvent(int). iren.CreateRepeatingTimer( 10 ) #Create a repeating timer, with the specified duration (in milliseconds). Return the timer id. #make the window interactive iren.Start()
def LoadNifti( name ): reader = vtk.vtkNIFTIImageReader() reader.SetFileName(name); reader.Update() return reader.GetOutput()
import sys import os import json import pandas as pd if not os.path.exists("surfaces"): os.makedirs("surfaces") #lut = pd.read_csv('FreeSurferColorLUT.csv') with open("labels.json") as f: labels = json.load(f) img_path = 'aparc+aseg.nii.gz' # import the binary nifti image print("loading %s" % img_path) reader = vtk.vtkNIFTIImageReader() reader.SetFileName(img_path) reader.Update() print("list unique values (super slow!)") out = reader.GetOutput() vtk_data=out.GetPointData().GetScalars() unique = set() for i in range(0, vtk_data.GetSize()): v = vtk_data.GetValue(i) unique.add(v) index=[] for label in labels: label_id=int(label["label"])
def create_stl(self, image=None, gif=None, file=None): ''' gif: three posible values int: get specified region len([])==1: lower threshold of binary mask len([])>1: get region of combined labels ''' # directory dir_end = file.rfind('\\') dir = file[:dir_end] filename = file[dir_end + 1:] name_end = filename.rfind('.') name = filename[:name_end] # create mask mask_image = None if isinstance(gif, int): # create mask of single region mask_image = (image == gif) elif len(gif) == 1: # take region as lowest threshold binaryThresholdFilter = sitk.BinaryThresholdImageFilter() binaryThresholdFilter.SetLowerThreshold(gif[0]) binaryThresholdFilter.SetUpperThreshold(208) mask_image = binaryThresholdFilter.Execute(image) elif len(gif) > 1: # union of masks of each region maskS = (image == gif[0]) for i in range(1, len(gif)): maskS = maskS | (image == gif[i]) # write NII # https://simpleitk.readthedocs.io/en/master/Documentation/docs/source/IO.html writer = sitk.ImageFileWriter() writer.SetImageIO("NiftiImageIO") writer.SetFileName(os.path.join(dir, name + '.nii.gz')) writer.Execute(mask_image) # read NII reader = vtk.vtkNIFTIImageReader() reader.SetFileName(os.path.join(dir, name + '.nii.gz')) reader.Update() # transformation matrix header = reader.GetNIFTIHeader() imgV = reader.GetOutput() mat = self.compute_transformation_matrix(header=header, image=imgV) # mask to vtk dim = reader.GetOutput().GetDimensions() centre = reader.GetOutput().GetOrigin() spacing = reader.GetOutput().GetSpacing() # print('vtk.vtkNIFTIImageReader() dim={} origin={} spacing={}'.format(dim, centre, spacing)) # print('GIF dim={} origin={} spacing={}'.format(image.GetSize(), image.GetOrigin(), image.GetSpacing())) # gaussian gaussian = vtk.vtkImageGaussianSmooth() gaussian.SetInputConnection(reader.GetOutputPort()) gaussian.SetDimensionality(3) gaussian.SetRadiusFactor(0.49) gaussian.SetStandardDeviation(0.1) gaussian.ReleaseDataFlagOn() gaussian.UpdateInformation() gaussian.Update() # marching cubes dmc = vtk.vtkDiscreteMarchingCubes() dmc.SetInputConnection(gaussian.GetOutputPort()) dmc.ComputeNormalsOn() dmc.SetValue(0, 1) dmc.Update() if dmc.GetOutput().GetNumberOfPoints() == 0: print('marching cubes of GIF={} is {}'.format( gif, dmc.GetOutput().GetNumberOfPoints())) return None # smooth marching cubes smoother = vtk.vtkWindowedSincPolyDataFilter() smoothingIterations = 30 # 15 10 passBand = 0.001 # 2 featureAngle = 60.0 # 120.0 360.0 smoother.SetInputConnection(dmc.GetOutputPort()) smoother.SetNumberOfIterations(smoothingIterations) smoother.BoundarySmoothingOff() smoother.FeatureEdgeSmoothingOff() # on smoother.SetFeatureAngle(featureAngle) smoother.SetPassBand(passBand) smoother.NonManifoldSmoothingOn() smoother.BoundarySmoothingOn() smoother.NormalizeCoordinatesOn() smoother.Update() # translate transform = vtk.vtkPerspectiveTransform() transform.SetMatrix(mat) # transform.Concatenate(matA) transformFilter = vtk.vtkTransformPolyDataFilter() transformFilter.SetTransform(transform) transformFilter.SetInputConnection(smoother.GetOutputPort()) transformFilter.Update() # transform if M was saved polydata = transformFilter.GetOutput() if polydata.GetNumberOfPoints() == 0: print('Number of mesh points', polydata.GetNumberOfPoints()) return None # create normals if not available point_normals = polydata.GetPointData().GetNormals() if point_normals == None: normalGen = vtk.vtkPolyDataNormals() normalGen.SetInputData(polydata) normalGen.AutoOrientNormalsOn() normalGen.Update() point_normals = normalGen.GetOutput().GetPointData().GetNormals() polydata.GetPointData().SetNormals(point_normals) polydata.GetPointData().GetNormals().Modified() polydata.GetPointData().Modified() # save STL stlWriter = vtk.vtkSTLWriter() stlWriter.SetFileName(file) # stlWriter.SetInputConnection(smoother.GetOutputPort()) # stlWriter.SetInputConnection(transformFilter.GetOutputPort()) stlWriter.SetInputData(polydata) stlWriter.Write() return polydata
def ImageFilter(input_filename, output_filename, range, overwrite=False): # Python 2/3 compatible input from six.moves import input # Check if output exists and should overwrite if os.path.isfile(output_filename) and not overwrite: result = input('File \"{}\" already exists. Overwrite? [y/n]: '.format( output_filename)) if result.lower() not in ['y', 'yes']: print('Not overwriting. Exiting...') os.sys.exit() # Check valid range if (range[0] > range[1] or range[0] < 0): os.sys.exit('[ERROR] Invalid range: {:d} {:d}'.format( range[0], range[1])) # Read input if not os.path.isfile(input_filename): os.sys.exit('[ERROR] Cannot find file \"{}\"'.format(input_filename)) if input_filename.lower().endswith('.nii'): reader = vtk.vtkNIFTIImageReader() elif input_filename.lower().endswith('.nii.gz'): reader = vtk.vtkNIFTIImageReader() else: os.sys.exit('[ERROR] Cannot find reader for file \"{}\"'.format( input_filename)) print('Reading input image ' + input_filename) reader.SetFileName(input_filename) reader.Update() scalarType = reader.GetOutput().GetScalarType() print('Input image scalar type: {:s}'.format( reader.GetOutput().GetScalarTypeAsString())) print('Input image labels:') histogram(reader.GetOutput()) thres = vtk.vtkImageThreshold() thres.SetInputConnection(reader.GetOutputPort()) thres.SetOutputScalarType(scalarType) thres.ThresholdBetween(1, 10) thres.ReplaceOutOn() thres.SetOutValue(0) thres.Update() print('Output image labels:') histogram(thres.GetOutput()) # Create writer if output_filename.lower().endswith('.nii'): writer = vtk.vtkNIFTIImageWriter() elif output_filename.lower().endswith('.nii.gz'): writer = vtk.vtkNIFTIImageWriter() else: os.sys.exit('[ERROR] Cannot find writer for file \"{}\"'.format( output_filename)) writer.SetInputConnection(thres.GetOutputPort()) writer.SetFileName(output_filename) writer.SetTimeDimension(reader.GetTimeDimension()) writer.SetTimeSpacing(reader.GetTimeSpacing()) writer.SetRescaleSlope(reader.GetRescaleSlope()) writer.SetRescaleIntercept(reader.GetRescaleIntercept()) writer.SetQFac(reader.GetQFac()) writer.SetQFormMatrix(reader.GetQFormMatrix()) writer.SetNIFTIHeader(reader.GetNIFTIHeader()) print('Saving image ' + output_filename) writer.Update()
import vtk file = './data/ct_stick.nii.gz' NiftiReader = vtk.vtkNIFTIImageReader() NiftiReader.SetFileName(file) NiftiReader.TimeAsVectorOn() NiftiReader.Update() ren = vtk.vtkRenderer() ren.SetBackground(0, 0, 0) renWin = vtk.vtkRenderWindow() renWin.SetSize(800, 600) renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera()) surface = vtk.vtkContourFilter() surface.SetInputConnection(NiftiReader.GetOutputPort()) surface.SetValue(0, 500) # Colors lut = vtk.vtkLookupTable() lut.SetNumberOfColors(3) lut.SetTableValue(0, 1, 0, 0, 0.5) # Red lut.SetTableValue(1, 0, 1, 0, 0.5) # Green lut.SetTableValue(2, 1, 1, 1, 0.5) # Blue ''' lut.SetTableValue(3, 0.8900, 0.8100, 0.3400, 0.5) # Banana
def _VolumeRender(self, m_contrastRange=None): # try: # TODO: Write document for the parameter of Volume Render m_imagePath = str(self._inDataDirectory) # TODO: Fill in document m_acceptedFormat = ['nii', 'vtk', 'DICOM'] # Handle contrast range if m_contrastRange == None: m_contrastUpper = None m_contrastLower = 0 else: m_contrastUpper = m_contrastRange[1] m_contrastLower = m_contrastRange[0] # Image type check TODO: nii.gz, DICOM, ECAT, finish nii.gz first m_imagePathSplitted = m_imagePath.split('.') m_suffix = m_imagePathSplitted[-1] # Create rendere first renderer = config.rendererDict[str(self._visualizationJobID)] renderer.SetBackground(0,0,0) # since nifti might be compressed if m_suffix == 'gz': m_suffix = m_imagePathSplitted[-2] if m_acceptedFormat.count(m_suffix) == 0: raise TypeError("Wrong input format, currently except %s"%m_acceptedFormat) # TODO: Write the following part to a reader function # if nifti - Load image as numpy array if m_suffix == 'nii': m_reader = vtk.vtkNIFTIImageReader() m_reader.SetFileName(self._inDataDirectory) # -- Use function from Main Process. mp = MainProcess.MainProcess() m_volume = mp.VolumeRenderingGPURayCast(m_reader, upperThreshold=m_contrastUpper, lowerThreshold=m_contrastLower) renderer.AddVolume(m_volume) # if vtk - Load by vtk methods elif m_suffix == 'vtk': m_reader = vtk.vtkPolyDataReader() m_reader.SetFileName(m_imagePath) # -- Call volume rendering function mp = MainProcess.MainProcess() m_actor = mp.VolumeRenderingDTILoader(m_reader) renderer.AddActor(m_actor) # if DICOM - Load VolumeRenderingDICOMLoader, note that if data is dicom, suffix ".DICOM" show be added to the inDataDirectory elif m_suffix == 'DICOM': # TODO: allows user defined Threshold # -- Construct dicom reader for function in main process m_reader = vtk.vtkDICOMImageReader() m_reader.SetDataByteOrderToLittleEndian() # TODO: allow user input m_reader.SetDirectoryName(m_imagePath.replace(".DICOM", "")) m_reader.SetDataSpacing(3.2,3.2,1.5) # TODO: allow user input m_reader.SetDataOrigin(0,0,0) # TODO: allow user input mp = MainProcess.MainProcess() m_volume = mp.VolumeRenderingGPUDICOMLoader(m_reader) renderer.AddVolume(m_volume) renWin = config.renWinDict[str(self._visualizationJobID)] renWin.AddRenderer(renderer) result = mp.ImageWriter(renderer, dimension=config.dimensionDict[self._visualizationJobID], outCompressionType=self._outCompressionType) return result
def save_nii2avi(niipath='test_0.nii.gz', Save_file_name="moive.mp4", Time_Loop=20, Avi_rate=5, Angle=20): # read a NIFTI file reader = vtk.vtkNIFTIImageReader() reader.SetFileName(niipath) reader.TimeAsVectorOn() reader.Update() def getActor(b1, b2, color): threshold = vtk.vtkImageThreshold() threshold.SetInputConnection(reader.GetOutputPort()) threshold.ThresholdBetween(b1, b2) threshold.ReplaceInOn() threshold.SetInValue(0) # set all values below 400 to 0 threshold.ReplaceOutOn() threshold.SetOutValue(1) # set all values above 400 to 1 threshold.Update() dmc = vtk.vtkDiscreteMarchingCubes() dmc.SetInputConnection(threshold.GetOutputPort()) dmc.GenerateValues(1, 1, 1) dmc.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(dmc.GetOutputPort()) mapper.ScalarVisibilityOff() actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(color) return actor actor_1 = getActor(0, 1, vtk_clr.banana) actor_2 = getActor(1, 2, vtk_clr.blue_light) actor_3 = getActor(2, 3, vtk_clr.green_dark) #Renderer renderer = vtk.vtkRenderer() renderer.SetBackground(0, 0, 0) #RenderWindow renwin = vtk.vtkRenderWindow() renwin.AddRenderer(renderer) #assemble all part assembly = vtk.vtkAssembly() assembly.AddPart(actor_1) assembly.AddPart(actor_2) assembly.AddPart(actor_3) assembly.SetOrigin(0, 0, 0) #Add outline assenble actor renderer.AddActor(assembly) renwin.SetSize(600, 600) # interactor = vtk.vtkRenderWindowInteractor() # interactor.SetRenderWindow(renwin) # interactor.Initialize() renwin.Render() #convert console to movie imageFilter = vtk.vtkWindowToImageFilter() imageFilter.SetInput(renwin) moviewriter = vtk.vtkOggTheoraWriter() moviewriter.SetInputConnection(imageFilter.GetOutputPort()) moviewriter.SetFileName(Save_file_name) moviewriter.Start() moviewriter.SetRate(Avi_rate) for i in range(Time_Loop): renderer.GetActiveCamera().Azimuth(Angle) imageFilter.Modified() moviewriter.Write() moviewriter.End()
## calculate geometric heterogeneity # read label map as vtk files # gt_vtklabel, gt_tfm = miapy_surfdist.read_and_transform_image(gtlabelpath) # b_vtklabel, b_tfm = miapy_surfdist.read_and_transform_image(bratumialabelpath) n_vtklabel, n_tfm = miapy_surfdist.read_and_transform_image(niftylabelpath) # tmp_gtvtkreader = vtk.vtkNIFTIImageReader() # tmp_gtvtkreader.SetFileName(gtlabelpath) # tmp_gtvtkreader.Update() # # tmp_bvtkreader = vtk.vtkNIFTIImageReader() # tmp_bvtkreader.SetFileName(bratumialabelpath) # tmp_bvtkreader.Update() tmp_nvtkreader = vtk.vtkNIFTIImageReader() tmp_nvtkreader.SetFileName(niftylabelpath) tmp_nvtkreader.Update() # # threshold image to combine NCR/NET and CET labels to later get an "outer" surface isocontour # thresholder = vtk.vtkImageThreshold() # thresholder.ReplaceInOn() # thresholder.SetInputData(gt_vtklabel) # thresholder.ThresholdBetween(1, 1) # Label for contrast enhancing tumor # thresholder.SetInValue(4) # Set to 4 (same as CET label) # thresholder.SetOutputScalarTypeToUnsignedShort() # thresholder.Update() # # # # threshold image to combine NCR/NET and CET labels to later get an "outer" surface isocontour # thresholder_b = vtk.vtkImageThreshold()
def surface_render_handler(self): filename = "segmentedlungs.nii.gz" # 2 set up the source reader_src = vtk.vtkNIFTIImageReader( ) #this class reads Nifti files (medical images) reader_src.SetFileName( filename ) #here we should read the name of our medical image file in the class and put in the class # 3 (filter) cast_filter = vtk.vtkImageCast( ) #Image data type casting filter: casts the input type to match the output type in the image processing pipeline cast_filter.SetInputConnection( reader_src.GetOutputPort() ) #SetInputData(..) assign a data object as input. Note that this method does not establish a pipeline connection cast_filter.SetOutputScalarTypeToUnsignedShort( ) #set the desired output scalar type to cast to # 4 marching cubes (mapper) contour = vtk.vtkMarchingCubes( ) #generate isosurface(s) from volume. It is a filter that takes as input a volume (e.g., 3D structured point set) and generates on contour.SetInputConnection(cast_filter.GetOutputPort() ) #Here we connect the input to the output? contour.ComputeNormalsOff( ) #set/get the computation of normals. Normal computation is fairly expensice in both time and storage. If the output data will be processed contour.ComputeGradientsOff( ) #set/get the computation of graients. Gradient computation is fairly expensive in both time and storage. Note that id ComputeNormals contour.SetValue(0, 100) # 4.5 Decimation deci = vtk.vtkDecimatePro() deci.SetInputConnection(contour.GetOutputPort()) deci.SetTargetReduction(0.9) deci.PreserveTopologyOff() # 4.5 Smoothing smoother = vtk.vtkWindowedSincPolyDataFilter() smoother = vtk.vtkSmoothPolyDataFilter() smoother.SetInputConnection(deci.GetOutputPort()) smoother.SetNumberOfIterations(500) #Normals if Off in Marching cubes normals = vtk.vtkPolyDataNormals() normals.SetInputConnection(smoother.GetOutputPort()) normals.FlipNormalsOn() con_mapper = vtk.vtkPolyDataMapper( ) #Uses a OpenGL to do the actual rendering con_mapper.SetInputConnection( contour.GetOutputPort() ) #Get the output port of the contour contour and connects it with input con_mapper.ScalarVisibilityOff() # 5 set up the actor #set properties for our actor prop = vtk.vtkProperty() opac = int(self.OpacSlider.value()) / 100 amb = int(self.AmbientSlider.value()) / 100 diff = int(self.DiffSlider.value()) / 100 spec = int(self.SpecSlider.value()) / 100 specpwr = 1 red = int(self.RedSlider.value()) / 100 green = int(self.GreenSlider.value()) / 100 blue = int(self.BlueSlider.value()) / 100 prop.SetOpacity(opac) prop.SetAmbient(amb) prop.SetDiffuse(diff) prop.SetSpecular(spec) prop.SetSpecularPower(specpwr) prop.SetColor(red, green, blue) actor = vtk.vtkActor( ) # a concrete implementattion of the abstract class vtkActor is different. actor.SetMapper( con_mapper ) # this is the method that is used to connect an actor to the end of a visualization pipeline, i.e. the mapper. actor.SetProperty(prop) #assign properties to our actor # 6 set up the camera and the renderer renderer = vtk.vtkRenderer() camera = vtk.vtkCamera() camera.SetViewUp(0., 1., 0.) camera.SetPosition(-500, 100, 100) camera.SetFocalPoint(200, 200, 200) # 7 set the color of the renderers background to black (0., 0., 0.) renderer.SetBackground(1, 1, 1) # 8 set the renderers canera as active renderer.SetActiveCamera(camera) # 9 add the volume actor to the renderer renderer.AddActor(actor) # 10 create a render window ren_win = vtk.vtkRenderWindow() # 11 add renderer to the render window ren_win.AddRenderer(renderer) # 12 create an interactor iren = vtk.vtkRenderWindowInteractor() # 13 connect interactor to the render window iren.SetRenderWindow(ren_win) if int(self.ani_slider.value()) == 1: self.ani_start() if int(self.stereo_slider.value()) == 1: print("stereo enbabled") ren_win.GetStereoCapableWindow() ren_win.StereoCapableWindowOn() ren_win.SetStereoRender(1) ren_win.SetStereoTypeToCrystalEyes() ren_win.Render() iren.Start() else: # 14 start displaying the render window ren_win.Render() # 15 make the window interactive iren.Start()
def read_volume(file_name): reader = vtk.vtkNIFTIImageReader() reader.SetFileNameSliceOffset(1) reader.SetDataByteOrderToBigEndian() reader.SetFileName(file_name) return reader