Example #1
0
    def _scalar_data_changed(self, data):
        img_data = self.image_data
        if data is None:
            img_data.point_data.scalars = None
            self.data_changed = True
            return
        dims = list(data.shape)
        if len(dims) == 2:
            dims.append(1)
      
        img_data.origin = tuple(self.origin)
        img_data.dimensions = tuple(dims)
        img_data.extent = 0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1
        img_data.update_extent = 0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1
        if self.transpose_input_array:
            img_data.point_data.scalars = numpy.ravel(numpy.transpose(data))
        else:
            img_data.point_data.scalars = numpy.ravel(data)            
        img_data.point_data.scalars.name = self.scalar_name
        # This is very important and if not done can lead to a segfault!
        typecode = data.dtype
        img_data.scalar_type = array_handler.get_vtk_array_type(typecode)
        img_data.update() # This sets up the extents correctly.
        img_data.update_traits()
        self.change_information_filter.update()

        # Now flush the mayavi pipeline.
        self.data_changed = True
Example #2
0
    def _scalar_data_changed(self, data):
        img_data = self.image_data
        if data is None:
            img_data.point_data.scalars = None
            self.data_changed = True
            return
        dims = list(data.shape)
        if len(dims) == 2:
            dims.append(1)
      
        img_data.origin = tuple(self.origin)
        img_data.dimensions = tuple(dims)
        img_data.extent = 0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1
        img_data.update_extent = 0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1
        if self.transpose_input_array:
            img_data.point_data.scalars = numpy.ravel(numpy.transpose(data))
        else:
            img_data.point_data.scalars = numpy.ravel(data)            
        img_data.point_data.scalars.name = self.scalar_name
        # This is very important and if not done can lead to a segfault!
        typecode = data.dtype
        img_data.scalar_type = array_handler.get_vtk_array_type(typecode)
        img_data.update() # This sets up the extents correctly.
        img_data.update_traits()

        # Now flush the mayavi pipeline.
        self.data_changed = True
Example #3
0
    def _change_primary_scalars(self, arr, name):
        """

        Parameters
        ----------

        arr: ndarray, shape (Nx, Ny, Nz, 4)
           If this is going in as primary scalars, it is definitely an RGBA
           vector array provided by a BlendedImages. Therefore is needs to
           be reshaped to C-order (Nz, Ny, Nx, 4)

        name: str
           array label
        """
        pd = self.data.point_data
        if pd.scalars is not None \
               and pd.scalars.size != arr.size:
            #self.flush_arrays(update=False)
            self.safe_remove_arrays()
        rgba = quick_convert_rgba_to_vtk(arr)
        xyz_shape = rgba.shape[:3]
        flat_shape = (np.prod(xyz_shape), 4)
        dataset = self.data


        # set the ImageData metadata
        dataset.origin = self.blender.img_origin
        dataset.spacing = self.blender.img_spacing
        dataset.dimensions = xyz_shape[::-1]
        dataset.extent = 0, xyz_shape[2]-1, 0, xyz_shape[1]-1, 0, xyz_shape[0]-1
        dataset.update_extent = dataset.extent

        dataset.number_of_scalar_components = 4
        dataset.scalar_type = get_vtk_array_type(arr.dtype)

        # set the scalars and name
        self.set_new_array(rgba, name, update=False)
##         pd.scalars = rgba.reshape(flat_shape)
##         pd.scalars.name = name

        dataset.update()
        dataset.update_traits()
        self._check_aa()
        self._update_data()
        self._push_changes()
        self.point_scalars_name = name
Example #4
0
    def _scalar_data_changed(self, data):
        img_data = self.image_data
        if data is None:
            img_data.point_data.scalars = None
            self.data_changed = True
            return
        is_rgba_bytes = (data.dtype.char=='B' and data.shape[-1]==4)
        dims = list(data.shape[:-1]) if is_rgba_bytes else list(data.shape)
        if len(dims) == 2:
            dims.append(1)
      
        img_data.origin = tuple(self.origin)

        flat_shape = ( np.prod(dims), )
        if is_rgba_bytes:
            flat_shape += (4,)
        if self.transpose_input_array:
            if is_rgba_bytes:
                # keep the color components in the last dimension
                d = data.transpose(2,1,0,3).copy()
                d.shape = flat_shape
                img_data.point_data.scalars = d
            else:
                img_data.point_data.scalars = np.ravel(np.transpose(data))
            img_data.dimensions = tuple(dims)
            img_data.extent = 0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1
            img_data.update_extent = 0, dims[0]-1, 0, dims[1]-1, 0, dims[2]-1
        else:
            img_data.point_data.scalars = data.reshape(flat_shape)
            img_data.dimensions = tuple(dims[::-1])
            img_data.extent = 0, dims[2]-1, 0, dims[1]-1, 0, dims[0]-1
            img_data.update_extent = 0, dims[2]-1, 0, dims[1]-1, 0, dims[0]-1

        img_data.number_of_scalar_components = 4 if is_rgba_bytes else 1
        img_data.point_data.scalars.name = self.scalar_name
        # This is very important and if not done can lead to a segfault!
        typecode = data.dtype
        img_data.scalar_type = get_vtk_array_type(typecode)
        img_data.update() # This sets up the extents correctly.
        img_data.update_traits()
        self.change_information_filter.update()

        # Now flush the mayavi pipeline.
        self.data_changed = True
# Make the tvtk dataset.
spoints = tvtk.StructuredPoints(origin=origin,
                                spacing=spacing,
                                dimensions=dims)
# The copy makes the data contiguous and the transpose makes it
# suitable for display via tvtk.  Note that it is not necessary to
# make the data contiguous since in that case the array is copied
# internally.
s = scalars.transpose().copy()
spoints.point_data.scalars = ravel(s)
spoints.point_data.scalars.name = 'scalars'

# This is needed in slightly older versions of VTK (like the 5.0.2
# release) to prevent a segfault.  VTK does not detect the correct
# data type.
spoints.scalar_type = get_vtk_array_type(s.dtype)

# Uncomment the next two lines to save the dataset to a VTK XML file.
#w = tvtk.XMLImageDataWriter(input=spoints, file_name='spoints3d.vti')
#w.write()


# Now view the data.
@mayavi2.standalone
def view():
    from enthought.mayavi.sources.vtk_data_source import VTKDataSource
    from enthought.mayavi.modules.outline import Outline
    from enthought.mayavi.modules.image_plane_widget import ImagePlaneWidget

    mayavi2.new_scene()
    src = VTKDataSource(data=spoints)
Example #6
0
# Make the tvtk dataset.
spoints = tvtk.StructuredPoints(origin=origin, spacing=spacing,
                                dimensions=dims)
# The copy makes the data contiguous and the transpose makes it
# suitable for display via tvtk.  Note that it is not necessary to
# make the data contiguous since in that case the array is copied
# internally.
s = scalars.transpose().copy()
spoints.point_data.scalars = ravel(s)
spoints.point_data.scalars.name = 'scalars'

# This is needed in slightly older versions of VTK (like the 5.0.2
# release) to prevent a segfault.  VTK does not detect the correct
# data type.
spoints.scalar_type = get_vtk_array_type(s.dtype)

# Uncomment the next two lines to save the dataset to a VTK XML file.
#w = tvtk.XMLImageDataWriter(input=spoints, file_name='spoints3d.vti')
#w.write()

# Now view the data.
@mayavi2.standalone
def view():
    from enthought.mayavi.sources.vtk_data_source import VTKDataSource
    from enthought.mayavi.modules.outline import Outline
    from enthought.mayavi.modules.image_plane_widget import ImagePlaneWidget

    mayavi.new_scene()
    src = VTKDataSource(data = spoints)
    mayavi.add_source(src)