def _scalar_data_changed(self, data): img_data = self.image_data if data is None: img_data.point_data.scalars = None self.data_changed = True return dims = list(data.shape) if len(dims) == 2: dims.append(1) # set the dimension indices dim0, dim1, dim2 = self.dimensions_order img_data.origin = tuple(self.origin) img_data.dimensions = tuple(dims) img_data.extent = 0, dims[dim0] - 1, 0, dims[dim1] - 1, 0, dims[ dim2] - 1 if VTK_MAJOR_VERSION <= 7: if is_old_pipeline(): img_data.update_extent = 0, dims[dim0] - 1, 0, dims[ dim1] - 1, 0, dims[dim2] - 1 else: update_extent = [ 0, dims[dim0] - 1, 0, dims[dim1] - 1, 0, dims[dim2] - 1 ] self.change_information_filter.set_update_extent(update_extent) if self.transpose_input_array: img_data.point_data.scalars = numpy.ravel(numpy.transpose(data)) else: img_data.point_data.scalars = numpy.ravel(data) img_data.point_data.scalars.name = self.scalar_name # This is very important and if not done can lead to a segfault! typecode = data.dtype if is_old_pipeline(): img_data.scalar_type = array_handler.get_vtk_array_type(typecode) img_data.update() # This sets up the extents correctly. else: filter_out_info = self.change_information_filter.get_output_information( 0) img_data.set_point_data_active_scalar_info( filter_out_info, array_handler.get_vtk_array_type(typecode), -1) img_data.modified() img_data.update_traits() self.change_information_filter.update() # Now flush the mayavi pipeline. self.data_changed = True
def _scalar_data_changed(self, data): img_data = self.image_data if data is None: img_data.point_data.scalars = None self.data_changed = True return dims = list(data.shape) if len(dims) == 2: dims.append(1) img_data.origin = tuple(self.origin) img_data.dimensions = tuple(dims) img_data.extent = 0, dims[0] - 1, 0, dims[1] - 1, 0, dims[2] - 1 img_data.update_extent = 0, dims[0] - 1, 0, dims[1] - 1, 0, dims[2] - 1 if self.transpose_input_array: img_data.point_data.scalars = numpy.ravel(numpy.transpose(data)) else: img_data.point_data.scalars = numpy.ravel(data) img_data.point_data.scalars.name = self.scalar_name # This is very important and if not done can lead to a segfault! typecode = data.dtype img_data.scalar_type = array_handler.get_vtk_array_type(typecode) img_data.update() # This sets up the extents correctly. img_data.update_traits() self.change_information_filter.update() # Now flush the mayavi pipeline. self.data_changed = True
def _scalar_data_changed(self, data): img_data = self.image_data if data is None: img_data.point_data.scalars = None self.data_changed = True return dims = list(data.shape) if len(dims) == 2: dims.append(1) img_data.origin = tuple(self.origin) img_data.dimensions = tuple(dims) img_data.extent = 0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1 img_data.update_extent = 0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1 if self.transpose_input_array: img_data.point_data.scalars = numpy.ravel(numpy.transpose(data)) else: img_data.point_data.scalars = numpy.ravel(data) img_data.point_data.scalars.name = self.scalar_name # This is very important and if not done can lead to a segfault! typecode = data.dtype img_data.scalar_type = array_handler.get_vtk_array_type(typecode) img_data.update() # This sets up the extents correctly. img_data.update_traits() self.change_information_filter.update() # Now flush the mayavi pipeline. self.data_changed = True
def _scalar_data_changed(self, data): img_data = self.image_data if data is None: img_data.point_data.scalars = None self.data_changed = True return dims = list(data.shape) if len(dims) == 2: dims.append(1) # set the dimension indices dim0, dim1, dim2 = self.dimensions_order img_data.origin = tuple(self.origin) img_data.dimensions = tuple(dims) img_data.extent = 0, dims[dim0]-1, 0, dims[dim1]-1, 0, dims[dim2]-1 if VTK_MAJOR_VERSION <= 7: if is_old_pipeline(): img_data.update_extent = 0, dims[dim0]-1, 0, dims[dim1]-1, 0, dims[dim2]-1 else: update_extent = [0, dims[dim0]-1, 0, dims[dim1]-1, 0, dims[dim2]-1] self.change_information_filter.set_update_extent(update_extent) if self.transpose_input_array: img_data.point_data.scalars = np.ravel(np.transpose(data)) else: img_data.point_data.scalars = np.ravel(data) img_data.point_data.scalars.name = self.scalar_name # This is very important and if not done can lead to a segfault! typecode = data.dtype if is_old_pipeline(): img_data.scalar_type = get_vtk_array_type(typecode) img_data.update() # This sets up the extents correctly. else: filter_out_info = self.change_information_filter.get_output_information(0) img_data.set_point_data_active_scalar_info(filter_out_info, get_vtk_array_type(typecode), -1) img_data.modified() img_data.update_traits() self.change_information_filter.update() # Now flush the mayavi pipeline. self.data_changed = True
def numpy2vtk( arr, fname ): # Make the data. # dims = array((128, 128, 128)) # vol = array((-5., 5, -5, 5, -5, 5)) # origin = vol[::2] # spacing = (vol[1::2] - origin)/(dims -1) # xmin, xmax, ymin, ymax, zmin, zmax = vol # x, y, z = ogrid[xmin:xmax:dims[0]*1j, # ymin:ymax:dims[1]*1j, # zmin:zmax:dims[2]*1j] # x, y, z = [t.astype('f') for t in (x, y, z)] # scalars = sin(x*y*z)/(x*y*z) dims = array( arr.shape ) vol = array((-5.,5, -5, 5, -5, 5)) origin = vol[::2] spacing = (vol[1::2] - origin)/(dims -1) xmin, xmax, ymin, ymax, zmin, zmax = vol x, y, z = ogrid[xmin:xmax:dims[0]*1j, ymin:ymax:dims[1]*1j, zmin:zmax:dims[2]*1j] x, y, z = [t.astype('f') for t in (x, y, z)] scalars = arr # Make the tvtk dataset. spoints = tvtk.StructuredPoints(origin=origin, spacing=spacing, dimensions=dims) # The copy makes the data contiguous and the transpose makes it # suitable for display via tvtk. Note that it is not necessary to # make the data contiguous since in that case the array is copied # internally. s = scalars.transpose().copy() spoints.point_data.scalars = ravel(s) spoints.point_data.scalars.name = 'scalars' # This is needed in slightly older versions of VTK (like the 5.0.2 # release) to prevent a segfault. VTK does not detect the correct # data type. spoints.scalar_type = get_vtk_array_type(s.dtype) # Uncomment the next two lines to save the dataset to a VTK XML file. w = tvtk.XMLImageDataWriter(input=spoints, file_name=fname+'.vti') w.write()
def StructuredScalar(InputArray, OutFileName, lim1, lim2): nGr = np.shape(InputArray)[0] # Make the data. nx1IC = ny1 = nz1 = int(lim1) nx2IC = ny2 = nz2 = int(lim2) im = jm = km = nGr * 1j nf0 = nf1 = nf2 = nGr Xg, Yg, Zg = np.mgrid[nx1IC:nx2IC:im, ny1:ny2:jm, nz1:nz2:km] # Make the data. dims = np.array((nf0, nf1, nf2)) vol = np.array((lim1, lim2, lim1, lim2, lim1, lim2)) origin = vol[::2] spacing = (vol[1::2] - origin) / (dims - 1) xmin, xmax, ymin, ymax, zmin, zmax = vol x, y, z = np.ogrid[xmin:xmax:dims[0] * 1j, ymin:ymax:dims[1] * 1j, zmin:zmax:dims[2] * 1j] x, y, z = [t.astype('f') for t in (x, y, z)] scalars = InputArray # Make the tvtk dataset. spoints = tvtk.StructuredPoints(origin=origin, spacing=spacing, dimensions=dims) s = scalars.transpose().copy() spoints.point_data.scalars = np.ravel(s) spoints.point_data.scalars.name = 'scalars' spoints.scalar_type = get_vtk_array_type(s.dtype) fileOut = OutFileName + '.vti' print fileOut w = tvtk.XMLImageDataWriter(input=spoints, file_name=fileOut) w.write()
# Make the tvtk dataset. spoints = tvtk.StructuredPoints(origin=origin, spacing=spacing, dimensions=dims) # The copy makes the data contiguous and the transpose makes it # suitable for display via tvtk. Note that it is not necessary to # make the data contiguous since in that case the array is copied # internally. s = scalars.transpose().copy() spoints.point_data.scalars = ravel(s) spoints.point_data.scalars.name = 'scalars' # This is needed in slightly older versions of VTK (like the 5.0.2 # release) to prevent a segfault. VTK does not detect the correct # data type. spoints.scalar_type = get_vtk_array_type(s.dtype) # Uncomment the next two lines to save the dataset to a VTK XML file. #w = tvtk.XMLImageDataWriter(input=spoints, file_name='spoints3d.vti') #w.write() # Now view the data. @mayavi2.standalone def view(): from mayavi.sources.vtk_data_source import VTKDataSource from mayavi.modules.outline import Outline from mayavi.modules.image_plane_widget import ImagePlaneWidget mayavi.new_scene() src = VTKDataSource(data = spoints) mayavi.add_source(src)
spoints = tvtk.StructuredPoints(origin=origin, spacing=spacing, dimensions=dims) # The copy makes the data contiguous and the transpose makes it # suitable for display via tvtk. Note that it is not necessary to # make the data contiguous since in that case the array is copied # internally. s = scalars.transpose().copy() spoints.point_data.scalars = ravel(s) spoints.point_data.scalars.name = 'scalars' # This is needed in slightly older versions of VTK (like the 5.0.2 # release) to prevent a segfault. VTK does not detect the correct # data type. if is_old_pipeline(): spoints.scalar_type = get_vtk_array_type(s.dtype) # Uncomment the next two lines to save the dataset to a VTK XML file. #w = tvtk.XMLImageDataWriter(input=spoints, file_name='spoints3d.vti') #w.write() # Now view the data. @mayavi2.standalone def view(): from mayavi.sources.vtk_data_source import VTKDataSource from mayavi.modules.outline import Outline from mayavi.modules.image_plane_widget import ImagePlaneWidget mayavi.new_scene() src = VTKDataSource(data=spoints)