class ModuleManager(Base): """ The module manager node (represented as 'Colors and Legends'). """ # The source object this is connected to. source = Instance(Base) # The modules contained by this manager. children = List(Module, record=True) # The data type to use for the LUTs. Changing this setting will # change the data range and name of the lookup table/legend bar. # If set to 'auto', it automatically looks for cell and point data # with point data being preferred over cell data and chooses the # one available. If set to 'point data' it uses the input point # data for the LUT and if set to 'cell data' it uses the input # cell data. lut_data_mode = Trait( 'auto', TraitPrefixList(LUT_DATA_MODE_TYPES), desc='specify the data type used by the lookup tables', ) # The scalar lookup table manager. scalar_lut_manager = Instance(LUTManager, args=(), record=True) # The vector lookup table manager. vector_lut_manager = Instance(LUTManager, args=(), record=True) # The name of the ModuleManager. name = Str('Colors and legends') # The icon icon = Str('modulemanager.ico') # The human-readable type for this object type = Str(' colors and legends') # Information about what this object can consume. input_info = PipelineInfo(datasets=['any']) # Information about what this object can produce. output_info = PipelineInfo(datasets=['any']) ###################################################################### # `object` interface ###################################################################### def __get_pure_state__(self): d = super(ModuleManager, self).__get_pure_state__() # Source is setup dynamically, don't pickle it. d.pop('source', None) return d def __set_pure_state__(self, state): # Do everything but our kids. set_state(self, state, ignore=['children']) # Setup children. handle_children_state(self.children, state.children) # Now setup the children. set_state(self, state, first=['children'], ignore=['*']) self.update() ###################################################################### # `ModuleManager` interface ###################################################################### def update(self): """Update any internal data. This is invoked when the source changes or when there are pipeline/data changes upstream. """ self._setup_scalar_data() self._setup_vector_data() ###################################################################### # `Base` interface ###################################################################### def start(self): """This is invoked when this object is added to the mayavi pipeline. """ # Do nothing if we are already running. if self.running: return # Setup event handlers. self._setup_event_handlers() # Start all our children. for obj in self.children: obj.start() for obj in (self.scalar_lut_manager, self.vector_lut_manager): obj.start() # Call parent method to set the running state. super(ModuleManager, self).start() def stop(self): """Invoked when this object is removed from the mayavi pipeline. """ if not self.running: return # Teardown event handlers. self._teardown_event_handlers() # Stop all our children. for obj in self.children: obj.stop() for obj in (self.scalar_lut_manager, self.vector_lut_manager): obj.stop() # Call parent method to set the running state. super(ModuleManager, self).stop() def add_child(self, child): """This method intelligently adds a child to this object in the MayaVi pipeline. """ if isinstance(child, Module): self.children.append(child) else: # Ask our source to deal with it. self.source.add_child(child) def remove_child(self, child): """Remove specified child from our children. """ self.children.remove(child) ###################################################################### # `TreeNodeObject` interface ###################################################################### def tno_can_add(self, node, add_object): """ Returns whether a given object is droppable on the node. """ try: if issubclass(add_object, Module): return True except TypeError: if isinstance(add_object, Module): return True return False def tno_drop_object(self, node, dropped_object): """ Returns a droppable version of a specified object. """ if isinstance(dropped_object, Module): return dropped_object ###################################################################### # Non-public interface ###################################################################### def _children_changed(self, old, new): self._handle_children(old, new) def _children_items_changed(self, list_event): self._handle_children(list_event.removed, list_event.added) def _handle_children(self, removed, added): # Stop all the old children. for obj in removed: obj.stop() # Setup and start the new ones. for obj in added: obj.set(module_manager=self, scene=self.scene, parent=self) if self.running: # It makes sense to start children only if we are running. # If not, the children will be started when we start. try: obj.start() except: exception() def _source_changed(self): self.output_info.copy_traits(self.source.output_info) self.update() def _setup_event_handlers(self): src = self.source src.on_trait_event(self.update, 'pipeline_changed') src.on_trait_event(self.update, 'data_changed') def _teardown_event_handlers(self): src = self.source src.on_trait_event(self.update, 'pipeline_changed', remove=True) src.on_trait_event(self.update, 'data_changed', remove=True) def _scene_changed(self, value): for obj in self.children: obj.scene = value for obj in (self.scalar_lut_manager, self.vector_lut_manager): obj.scene = value def _lut_data_mode_changed(self, value): self.update() def _setup_scalar_data(self): """Computes the scalar range and an appropriate name for the lookup table.""" input = self.source.outputs[0] ps = input.point_data.scalars cs = input.cell_data.scalars data_attr = DataAttributes(name='No scalars') point_data_attr = DataAttributes(name='No scalars') point_data_attr.compute_scalar(ps, 'point') cell_data_attr = DataAttributes(name='No scalars') cell_data_attr.compute_scalar(cs, 'cell') if self.lut_data_mode == 'auto': if len(point_data_attr.range) > 0: data_attr.copy_traits(point_data_attr) elif len(cell_data_attr.range) > 0: data_attr.copy_traits(cell_data_attr) elif self.lut_data_mode == 'point data': data_attr.copy_traits(point_data_attr) elif self.lut_data_mode == 'cell data': data_attr.copy_traits(cell_data_attr) data_attr.config_lut(self.scalar_lut_manager) def _setup_vector_data(self): input = self.source.outputs[0] pv = input.point_data.vectors cv = input.cell_data.vectors data_attr = DataAttributes(name='No vectors') point_data_attr = DataAttributes(name='No vectors') point_data_attr.compute_vector(pv, 'point') cell_data_attr = DataAttributes(name='No vectors') cell_data_attr.compute_vector(cv, 'cell') if self.lut_data_mode == 'auto': if len(point_data_attr.range) > 0: data_attr.copy_traits(point_data_attr) elif len(cell_data_attr.range) > 0: data_attr.copy_traits(cell_data_attr) elif self.lut_data_mode == 'point data': data_attr.copy_traits(point_data_attr) elif self.lut_data_mode == 'cell data': data_attr.copy_traits(cell_data_attr) data_attr.config_lut(self.vector_lut_manager) def _visible_changed(self, value): for c in self.children: c.visible = value self.scalar_lut_manager.visible = value self.vector_lut_manager.visible = value super(ModuleManager, self)._visible_changed(value) def _menu_helper_default(self): from enthought.mayavi.core.traits_menu import ModuleMenuHelper return ModuleMenuHelper(object=self)
src = Sp4ArrayCoordSource(file_name=fname, engine=engine) src.scalar_name=fname src.name=os.path.split(fname)[-1] else: from Sp4ArraySource import Sp4ArraySource src = Sp4ArraySource(file_name=fname, engine=engine) return src sp4_reader_info = SourceMetadata( id = "Sp4Array File Reader", factory = 'sp4_reader.sp4_reader', tooltip = "Load a Sp4 file", desc = "Load a Sp4 file", help = "Load a Sp4 file", menu_name = "&Sp4 file", extensions = ['sp4','SP4'], wildcard = 'Sp4 files (*.Sp4)|*.Sp4', output_info = PipelineInfo(datasets=['image_data','structured_grid'], attribute_types=['any'], attributes=['any']) ) # Inject this information in the mayavi registry registry.sources.append(sp4_reader_info) if __name__=='__main__': source=Sp4ArrayFileSource(file_name='/Users/rharder/PhasingProjects/diffmap-moyuAu/Au708-81-Imask400rs/BD-data.sp4') source.update() print source._scalar_data source.configure_traits()
class TensorGlyph(Module): # The version of this class. Used for persistence. __version__ = 0 # The glyph component we use to do the actual glyphing. glyph = Instance(glyph.Glyph, allow_none=False, record=True) # The actor. actor = Instance(Actor, allow_none=False, record=True) input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['tensors']) # Create the UI for the traits. view = View( Group(Item(name='actor', style='custom'), show_labels=False, label='Actor'), Group(Item(name='glyph', style='custom', resizable=True), label='Tensor Glyph', selected=True, show_labels=False)) ###################################################################### # `Module` interface ###################################################################### def setup_pipeline(self): """Override this method so that it *creates* the tvtk pipeline. This method is invoked when the object is initialized via `__init__`. Note that at the time this method is called, the tvtk data pipeline will *not* yet be setup. So upstream data will not be available. The idea is that you simply create the basic objects and setup those parts of the pipeline not dependent on upstream sources and filters. You should also set the `actors` attribute up at this point. """ # Setup the glyphs. self.glyph = glyph.Glyph(glyph_type='tensor') self.glyph.glyph_source.glyph_source = self.glyph.glyph_source.glyph_list[ 4] self.actor = Actor() def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when any of the inputs sends a `pipeline_changed` event. """ mm = self.module_manager if mm is None: return self.glyph.inputs = [mm.source] self.pipeline_changed = True def update_data(self): """Override this method so that it flushes the vtk pipeline if that is necessary. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ # Just set data_changed, the component should do the rest. self.data_changed = True ###################################################################### # Non-public methods. ###################################################################### def _glyph_changed(self, old, new): # Set the glyph's module attribute -- this is important! new.module = self # Setup actors inputs. actor = self.actor if actor is not None: actor.inputs = [new] self._change_components(old, new) def _actor_changed(self, old, new): new.scene = self.scene #new.inputs = [self] g = self.glyph if g is not None: new.inputs = [g] self._change_components(old, new)
class Threshold(Filter): # The version of this class. Used for persistence. __version__ = 0 # The threshold filter used. threshold_filter = Property(Instance(tvtk.Object, allow_none=False), record=True) # The filter type to use, specifies if the cells or the points are # cells filtered via a threshold. filter_type = Enum('cells', 'points', desc='if thresholding is done on cells or points') # Lower threshold (this is a dynamic trait that is changed when # input data changes). lower_threshold = Range(value=-1.0e20, low='_data_min', high='_data_max', enter_set=True, auto_set=False, desc='the lower threshold of the filter') # Upper threshold (this is a dynamic trait that is changed when # input data changes). upper_threshold = Range(value=1.0e20, low='_data_min', high='_data_max', enter_set=True, auto_set=False, desc='the upper threshold of the filter') # Automatically reset the lower threshold when the upstream data # changes. auto_reset_lower = Bool(True, desc='if the lower threshold is ' 'automatically reset when upstream ' 'data changes') # Automatically reset the upper threshold when the upstream data # changes. auto_reset_upper = Bool(True, desc='if the upper threshold is ' 'automatically reset when upstream ' 'data changes') input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) output_info = PipelineInfo(datasets=['poly_data', 'unstructured_grid'], attribute_types=['any'], attributes=['any']) # Our view. view = View(Group( Group(Item(name='filter_type'), Item(name='lower_threshold'), Item(name='auto_reset_lower'), Item(name='upper_threshold'), Item(name='auto_reset_upper')), Item(name='_'), Group( Item(name='threshold_filter', show_label=False, visible_when='object.filter_type == "cells"', style='custom', resizable=True)), ), resizable=True) ######################################## # Private traits. # These traits are used to set the limits for the thresholding. # They store the minimum and maximum values of the input data. _data_min = Float(-1e20) _data_max = Float(1e20) # The threshold filter for cell based filtering _threshold = Instance(tvtk.Threshold, args=(), allow_none=False) # The threshold filter for points based filtering. _threshold_points = Instance(tvtk.ThresholdPoints, args=(), allow_none=False) # Internal data to _first = Bool(True) ###################################################################### # `object` interface. ###################################################################### def __get_pure_state__(self): d = super(Threshold, self).__get_pure_state__() # These traits are dynamically created. for name in ('_first', '_data_min', '_data_max'): d.pop(name, None) return d ###################################################################### # `Filter` interface. ###################################################################### def setup_pipeline(self): attrs = [ 'all_scalars', 'attribute_mode', 'component_mode', 'selected_component' ] self._threshold.on_trait_change(self._threshold_filter_edited, attrs) def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when the input fires a `pipeline_changed` event. """ if len(self.inputs) == 0: return # By default we set the input to the first output of the first # input. fil = self.threshold_filter fil.input = self.inputs[0].outputs[0] self._update_ranges() self._set_outputs([self.threshold_filter.output]) def update_data(self): """Override this method to do what is necessary when upstream data changes. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ if len(self.inputs) == 0: return self._update_ranges() # Propagate the data_changed event. self.data_changed = True ###################################################################### # Non-public interface ###################################################################### def _lower_threshold_changed(self, new_value): fil = self.threshold_filter fil.threshold_between(new_value, self.upper_threshold) fil.update() self.data_changed = True def _upper_threshold_changed(self, new_value): fil = self.threshold_filter fil.threshold_between(self.lower_threshold, new_value) fil.update() self.data_changed = True def _update_ranges(self): """Updates the ranges of the input. """ data_range = self._get_data_range() if len(data_range) > 0: dr = data_range if self._first: self._data_min, self._data_max = dr self.set(lower_threshold=dr[0], trait_change_notify=False) self.upper_threshold = dr[1] self._first = False else: if self.auto_reset_lower: self._data_min = dr[0] notify = not self.auto_reset_upper self.set(lower_threshold=dr[0], trait_change_notify=notify) if self.auto_reset_upper: self._data_max = dr[1] self.upper_threshold = dr[1] def _get_data_range(self): """Returns the range of the input scalar data.""" input = self.inputs[0].outputs[0] data_range = [] ps = input.point_data.scalars cs = input.cell_data.scalars # FIXME: need to be able to handle cell and point data # together. if ps is not None: data_range = list(ps.range) if np.isnan(data_range[0]): data_range[0] = float(np.nanmin(ps.to_array())) if np.isnan(data_range[1]): data_range[1] = float(np.nanmax(ps.to_array())) elif cs is not None: data_range = cs.range if np.isnan(data_range[0]): data_range[0] = float(np.nanmin(cs.to_array())) if np.isnan(data_range[1]): data_range[1] = float(np.nanmax(cs.to_array())) return data_range def _auto_reset_lower_changed(self, value): if len(self.inputs) == 0: return if value: dr = self._get_data_range() self._data_min = dr[0] self.lower_threshold = dr[0] def _auto_reset_upper_changed(self, value): if len(self.inputs) == 0: return if value: dr = self._get_data_range() self._data_max = dr[1] self.upper_threshold = dr[1] def _get_threshold_filter(self): if self.filter_type == 'cells': return self._threshold else: return self._threshold_points def _filter_type_changed(self, value): if value == 'cells': old = self._threshold_points new = self._threshold else: old = self._threshold new = self._threshold_points self.trait_property_changed('threshold_filter', old, new) def _threshold_filter_changed(self, old, new): if len(self.inputs) == 0: return fil = new fil.input = self.inputs[0].outputs[0] fil.threshold_between(self.lower_threshold, self.upper_threshold) fil.update() self._set_outputs([fil.output]) def _threshold_filter_edited(self): self.threshold_filter.update() self.data_changed = True
class BuiltinSurface(Source): # The version of this class. Used for persistence. __version__ = 0 # Flag to set the poly data type. source = Enum('arrow', 'cone', 'cube', 'cylinder', 'disk', 'earth', 'line', 'outline', 'plane', 'point', 'polygon', 'sphere', 'superquadric', 'textured sphere', 'glyph2d', desc='which poly data source to be used') # Define the trait 'data_source' whose value must be an instance of # type PolyData data_source = Instance(tvtk.PolyDataAlgorithm, allow_none=False, record=True) # Information about what this object can produce. output_info = PipelineInfo(datasets=['poly_data'], attribute_types=['any'], attributes=['any']) # Create the UI for the traits. view = View(Group(Item(name='source'), Item(name='data_source', style='custom', resizable=True), label='Surface Source', show_labels=False), resizable=True) ######################################## # Private traits. # A dictionary that maps the source names to instances of the # poly data sources. _source_dict = Dict(Str, Instance(tvtk.PolyDataAlgorithm, allow_none=False)) ###################################################################### # `object` interface ###################################################################### def __init__(self, **traits): # Call parent class' init. super(BuiltinSurface, self).__init__(**traits) # Initialize the source to the default mode's instance from # the dictionary if needed. if 'source' not in traits: self._source_changed(self.source) def __set_pure_state__(self, state): self.source = state.source super(BuiltinSurface, self).__set_pure_state__(state) ###################################################################### # Non-public methods. ###################################################################### def _source_changed(self, value): """This method is invoked (automatically) when the `source` trait is changed. """ self.data_source = self._source_dict[self.source] def _data_source_changed(self, old, new): """This method is invoked (automatically) when the poly data source is changed .""" self.outputs = [self.data_source.output] if old is not None: old.on_trait_change(self.render, remove=True) new.on_trait_change(self.render) def __source_dict_default(self): """Default value for source dict.""" sd = { 'arrow': tvtk.ArrowSource(), 'cone': tvtk.ConeSource(), 'cube': tvtk.CubeSource(), 'cylinder': tvtk.CylinderSource(), 'disk': tvtk.DiskSource(), 'earth': tvtk.EarthSource(), 'line': tvtk.LineSource(), 'outline': tvtk.OutlineSource(), 'plane': tvtk.PlaneSource(), 'point': tvtk.PointSource(), 'polygon': tvtk.RegularPolygonSource(), 'sphere': tvtk.SphereSource(), 'superquadric': tvtk.SuperquadricSource(), 'textured sphere': tvtk.TexturedSphereSource(), 'glyph2d': tvtk.GlyphSource2D() } return sd
class TriangleReader(FileDataSource): """ Reader for the Triangle file formats: 2D <http://www.cs.cmu.edu/~quake/triangle.html> Supports opening .egde files to construct a surface mesh comprised of lines and .ele files to construct a solid mesh comprised of triangles. 3D <http://tetgen.berlios.de/fformats.html> Supports opening .face files to construct a surface mesh comprised of triangles and .ele files to construct a solid mesh comprised of tetrahedra. Outputs an unstructured grid dataset. """ # The version of this class. Used for persistence. __version__ = 0 # Information about what this object can produce. output_info = PipelineInfo(datasets=['unstructured_grid'], attribute_types=['any'], attributes=['scalars']) # The active point scalar name. point_scalars_name = DEnum(values_name='_point_scalars_list', desc='scalar point data attribute to use') # The active cell scalar name. cell_scalars_name = DEnum(values_name='_cell_scalars_list', desc='scalar cell data attribute to use') ######################################## # Private traits. # These private traits store the list of available data # attributes. The non-private traits use these lists internally. _cell_scalars_list = List(String) _point_scalars_list = List(String) # The VTK dataset to manage. _grid = Instance(tvtk.UnstructuredGrid, args=(), allow_none=False) # The basename of the file which has been loaded. _basename = String # Indicates whether nodes are numbered from 0 or 1 (the file # format allows both). _numbered_from = Int # This filter allows us to change the attributes of the data # object and will ensure that the pipeline is properly taken care # of. _assign_attribute = Instance(tvtk.AssignAttribute, args=(), allow_none=False) ######################################## # The view. view = View(Item(name='point_scalars_name'), Item(name='cell_scalars_name')) ######################################## # `FileDataSource` interface. def initialize(self, base_file_name): split = path.splitext(base_file_name) self._basename = split[0] extension = split[1] self._assign_attribute.input = self._grid self._read_node_file() if (extension == '.face' or extension == '.edge'): self._read_face_edge_file(extension) else: self._read_ele_file() self.outputs = [self._assign_attribute.output] self.name = 'Triangle file (%s%s)' % (path.basename( self._basename), extension) ######################################## # File reading methods. def _read_node_file(self): """ Loads data from {basename}.node, and inserts points and point scalars into the unstructured grid. """ file_name = '%s.node' % self._basename # Load all data. all_data = self._get_data(file_name) # Grab values from the first line of data file. points, dimensions, attributes, boundary_marker = map( int, all_data[0:4]) # Reshape remainder of array. data_array = all_data[4:].reshape( points, 1 + dimensions + attributes + boundary_marker) self._numbered_from = int(data_array[0][0]) points_array = array(data_array[:, 1:(1 + dimensions)], 'double') if (dimensions == 2): # Add a 0 to each point if it is 2D. points_array = array(map(lambda a: append(a, 0), points_array)) self._grid.points = points_array for i in range(attributes): attribute_array = data_array[:, (i + dimensions + 1):(i + dimensions + 2)] self._add_attribute_array(attribute_array, i, 'point') if (boundary_marker): boundary_marker_array = data_array[:, (dimensions + attributes + 1):(dimensions + attributes + 2)] self._add_boundary_marker_array(boundary_marker_array, 'point') def _read_face_edge_file(self, extension): """ Loads data from 2D {basename}.edge or 3D {basename}.face, and inserts triangle/line cells and cell scalars into the unstructured grid. """ file_name = '%s%s' % (self._basename, extension) if (extension == '.edge'): # 2D. Expect two endpoints which form a line. npoints = 2 cell_type = tvtk.Line().cell_type else: # 3D. Expect three points which form a triangle. npoints = 3 cell_type = tvtk.Triangle().cell_type # Load all data. all_data = self._get_data(file_name) # Grab values from the first line of data file. faces_edges, boundary_marker = map(int, all_data[0:2]) # Reshape remainder of array. data_array = all_data[2:].reshape(faces_edges, npoints + 1 + boundary_marker) nodes_array = data_array[:, 1:npoints + 1] - self._numbered_from self._grid.set_cells(cell_type, nodes_array) if (boundary_marker): boundary_marker_array = data_array[:, npoints + 1:npoints + 2] self._add_boundary_marker_array(boundary_marker_array, 'cell') def _read_ele_file(self): """ Loads data from {basename}.ele, and inserts triangle/tetrahedron cells and cell scalars into the unstructured grid. """ file_name = '%s.ele' % self._basename # Load all data. all_data = self._get_data(file_name) # Grab values from the first line of data file. tet_tri, nodes_per_tet_tri, attributes = map(int, all_data[0:3]) # Reshape remainder of array. data_array = all_data[3:].reshape(tet_tri, 1 + nodes_per_tet_tri + attributes) nodes_array = data_array[:, 1:(nodes_per_tet_tri + 1)] - self._numbered_from if (nodes_per_tet_tri == 3): cell_type = tvtk.Triangle().cell_type else: cell_type = tvtk.Tetra().cell_type self._grid.set_cells(cell_type, nodes_array) for i in range(attributes): attribute_array = data_array[:, (i + nodes_per_tet_tri + 1):(i + nodes_per_tet_tri + 2)] self._add_attribute_array(attribute_array, i, 'cell') def _get_data(self, file_name): """ Returns a 1D array containing all the data from the given file. """ file = open(file_name) file_string = file.read() # Strip comments. pattern = compile('#.*?$', MULTILINE) file_string = pattern.sub('', file_string) # Load all data into array. return fromstring(file_string, dtype=float, sep=" ") ######################################## # Unstructured grid construction # methods. def _add_attribute_array(self, attribute_array, i, type): """ Adds the given attribute array to either point_data or cell_data of the unstructured grid. """ attribute_array_name = 'Attribute %i' % i if (type == 'cell'): # .ele file attributes are of type Int tvtk_attribute_array = tvtk.IntArray(name=attribute_array_name) attribute_array = map(int, attribute_array) else: # .node file attributes are of type Float tvtk_attribute_array = tvtk.FloatArray(name=attribute_array_name) tvtk_attribute_array.from_array(attribute_array) getattr(self._grid, '%s_data' % type).add_array(tvtk_attribute_array) getattr(self, '_%s_scalars_list' % type).append(attribute_array_name) if (i == 0): self._set_data_name(type, 'Attribute 0') def _add_boundary_marker_array(self, boundary_marker_array, type): """ Adds the given boundary marker array to either point_data or cell_data of the unstructured grid. """ boundary_marker_array_name = 'Boundary Marker' tvtk_boundary_marker_array = tvtk.IntArray( name=boundary_marker_array_name) tvtk_boundary_marker_array.from_array(boundary_marker_array) getattr(self._grid, '%s_data' % type).add_array(tvtk_boundary_marker_array) getattr(self, '_%s_scalars_list' % type).append(boundary_marker_array_name) self._set_data_name(type, 'Boundary Marker') ######################################## # Methods taken and modified from # SetActiveAttribute filter. def _point_scalars_name_changed(self, value): self._set_data_name('point', value) def _cell_scalars_name_changed(self, value): self._set_data_name('cell', value) def _set_data_name(self, attr_type, value): """ Sets the selected point or cell scalar to be active, and deactivates the scalar of the other type. """ if value is None: return if (attr_type == 'point'): data = self._grid.point_data other_data = self._grid.cell_data else: data = self._grid.cell_data other_data = self._grid.point_data method = getattr(data, 'set_active_scalars') method(value) # Deactivate other attribute. method = getattr(other_data, 'set_active_scalars') method(None) self._assign_attribute.assign(value, 'SCALARS', attr_type.upper() + '_DATA') self._assign_attribute.update() # Fire an event, so the changes propagate. self.data_changed = True
class ExtractVectorNorm(FilterBase): """Computes the norm (Eucliedean) of the input vector data (with optional scaling between [0, 1]). This is useful when the input data has vector input but no scalar data for the magnitude of the vectors. """ # The version of this class. Used for persistence. __version__ = 0 # The actual TVTK filter that this class manages. filter = Instance(tvtk.VectorNorm, args=(), allow_none=False, record=True) input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['vectors']) output_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) ###################################################################### # `Filter` interface. ###################################################################### def update_pipeline(self): # Do nothing if there is no input. inputs = self.inputs if len(inputs) == 0: return # By default we set the input to the first output of the first # input. fil = self.filter fil.input = inputs[0].outputs[0] fil.update() self._set_array_name(fil) self._set_outputs([fil.output]) def update_data(self): """Override this method to do what is necessary when upstream data changes. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ # Do nothing if there is no input. inputs = self.inputs if len(inputs) == 0: return self.filter.update() self._set_array_name(self.filter) # Propagate the data_changed event. self.data_changed = True ###################################################################### # Non-public interface. ###################################################################### def _set_array_name(self, filter): # Do nothing if there is no input. if len(self.inputs) == 0: return o = filter.output pd = o.point_data ps = pd.scalars cd = o.cell_data cs = cd.scalars if (ps is not None) and (not ps.name): ps.name = pd.vectors.name + ' magnitude' elif (cs is not None) and (not cs.name): cs.name = cd.vectors.name + ' magnitude'
class Source(PipelineBase): """ Base class for the sources objects in the pipeline. """ # The version of this class. Used for persistence. __version__ = 0 # The children of this source in the tree view. These objects all # get the output of this source. children = List(Base, record=True) # The icon icon = 'source.ico' # The human-readable type for this object type = Str(' data source') # Information about what this object can consume. input_info = PipelineInfo(datasets=['none']) # Information about what this object can produce. output_info = PipelineInfo(datasets=['any']) # The adder node dialog class _adder_node_class = ModuleFilterAdderNode ###################################################################### # `object` interface ###################################################################### def __set_pure_state__(self, state): # Do everything but our kids. set_state(self, state, ignore=['children']) # Setup children. handle_children_state(self.children, state.children) # Now setup the children. set_state(self, state, first=['children'], ignore=['*']) ###################################################################### # `Source` interface ###################################################################### def add_module(self, module): """ Adds a module smartly. If no ModuleManager instances are children, it first creates a new ModuleManager and then adds the module to it. If not it adds the module to the first available ModuleManager instance.""" mm = None for child in self.children: if isinstance(child, ModuleManager): mm = child if mm is None: mm = ModuleManager(source=self, scene=self.scene) if self.running: mm.start() self.children.append(mm) if self.recorder is not None: index = len(self.children) - 1 self.recorder.register(mm, parent=self, trait_name_on_parent='children[%d]'%index) mm.children.append(module) @recordable def save_output(self, fname): """Save our output (by default the first of our outputs) to the specified filename as a VTK file. Both old style and new style XML files are supported. """ if len(self.outputs) > 0: write_data(self.outputs[0], fname) else: error('Object has no outputs to save!') ###################################################################### # `Base` interface ###################################################################### def start(self): """This is invoked when this object is added to the mayavi pipeline. """ # Do nothing if we are already running. if self.running: return # Start all our children. for obj in self.children: try: obj.start() except: exception() # Call parent method to set the running state. super(Source, self).start() def stop(self): """Invoked when this object is removed from the mayavi pipeline. """ if not self.running: return # Stop all our children. for obj in self.children: obj.stop() # Call parent method to set the running state. super(Source, self).stop() def add_child(self, child): """This method intelligently adds a child to this object in the MayaVi pipeline. """ if is_filter(child): # It is a Filter, so append to children. self.children.append(child) elif isinstance(child, Source): # A non-filter source object. This should be added to the # scene. self.parent.add_child(child) elif isinstance(child, Module): # Modules should be added carefully via add_module. self.add_module(child) elif isinstance(child, ModuleManager): self.children.append(child) else: self.children.append(child) def remove_child(self, child): """Remove specified child from our children. """ self.children.remove(child) ###################################################################### # `TreeNodeObject` interface ###################################################################### def tno_can_add(self, node, add_object): """ Returns whether a given object is droppable on the node. """ from enthought.mayavi.core.filter import Filter try: if issubclass(add_object, Filter) or \ issubclass(add_object, ModuleManager): return True except TypeError: if isinstance(add_object, Filter) or \ isinstance(add_object, ModuleManager): return True return False def tno_drop_object(self, node, dropped_object): """ Returns a droppable version of a specified object. """ if is_filter(dropped_object) or \ isinstance(dropped_object, ModuleManager): return dropped_object ###################################################################### # Non-public interface ###################################################################### def _children_changed(self, old, new): self._handle_children(old, new) def _children_items_changed(self, list_event): self._handle_children(list_event.removed, list_event.added) def _handle_children(self, removed, added): # Stop all the removed children. for obj in removed: obj.stop() # Process the new objects. for obj in added: obj.set(scene=self.scene, parent=self) if isinstance(obj, ModuleManager): obj.source = self elif is_filter(obj): obj.inputs.append(self) if self.running: try: obj.start() except: exception() def _scene_changed(self, old, new): super(Source, self)._scene_changed(old, new) for obj in self.children: obj.scene = new def _visible_changed(self,value): for c in self.children: c.visible = value super(Source,self)._visible_changed(value) def _menu_helper_default(self): from enthought.mayavi.core.traits_menu import FilterMenuHelper return FilterMenuHelper(object=self) def _extra_menu_items(self): """Return a save output menu action.""" save_output = Action(name='Save output to file', action='object._save_output_action', enabled_when='len(object.outputs) > 0') return [save_output] def _save_output_action(self): """Pops up a dialog box for the action to ask for a file.""" # FIXME: in a refactor this should all go in a separate view # related object. from enthought.pyface.api import FileDialog, OK wildcard = 'All files (*.*)|*.*|'\ 'VTK XML files (*.xml)|*.xml|'\ 'Image Data (*.vti)|*.vti|'\ 'Poly Data (*.vtp)|*.vtp|'\ 'Rectilinear Grid (*.vtr)|*.vtr|'\ 'Structured Grid (*.vts)|*.vts|'\ 'Unstructured Grid (*.vtu)|*.vtu|'\ 'Old-style VTK files (*.vtk)|*.vtk' dialog = FileDialog(title='Save output to file', action='save as', wildcard=wildcard ) if dialog.open() == OK: self.save_output(dialog.path)
class Streamline(Module): # The version of this class. Used for persistence. __version__ = 0 # The streamline generator. stream_tracer = Instance(tvtk.StreamTracer, allow_none=False, record=True) # The seed for the streamlines. seed = Instance(SourceWidget, allow_none=False, record=True) # The update mode of the seed -- this is delegated to the # SourceWidget. update_mode = Delegate('seed', modify=True) # Determines if the streamlines are shown as lines or ribbons or # tubes. streamline_type = Trait('line', TraitPrefixList(['line', 'ribbon', 'tube']), desc='draw streamlines as lines/ribbons/tubes') # The ribbon filter. ribbon_filter = Instance(tvtk.RibbonFilter, allow_none=False, record=True) # The tube filter. tube_filter = Instance(tvtk.TubeFilter, allow_none=False, record=True) # The actor component that represents the visualization. actor = Instance(Actor, allow_none=False, record=True) input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['vectors']) ######################################## # Private traits. _first = Bool(True) ######################################## # View related code. # A button to update the streamlines. update_streamlines = Button('Update Streamlines') _tube_group = Group(Item(name='capping'), Item(name='sides_share_vertices'), Item(name='vary_radius'), Item(name='number_of_sides'), Item(name='radius'), Item(name='radius_factor'), Item(name='offset'), Item(name='on_ratio')) _ribbon_group = Group(Item(name='vary_width'), Item(name='width'), Item(name='width_factor'), Item(name='angle')) view = View(Group( Group(Item(name='update_mode'), ), Group( Item(name='update_streamlines'), show_labels=False, ), Group(Item(name='streamline_type'), Item(name='ribbon_filter', style='custom', visible_when='object.streamline_type == "ribbon"', editor=InstanceEditor(view=View(_ribbon_group))), Item(name='tube_filter', style='custom', visible_when='object.streamline_type == "tube"', editor=InstanceEditor(view=View(_tube_group))), show_labels=False, label='Streamline'), label='Streamline'), Group(Item(name='seed', style='custom', resizable=True), label='Seed', show_labels=False), Group(Item(name='stream_tracer', style='custom', resizable=True), label='StreamTracer', show_labels=False), Group(Item(name='actor', style='custom'), label='Actor', show_labels=False), resizable=True) ###################################################################### # `Module` interface ###################################################################### def setup_pipeline(self): """Override this method so that it *creates* the tvtk pipeline. This method is invoked when the object is initialized via `__init__`. Note that at the time this method is called, the tvtk data pipeline will *not* yet be setup. So upstream data will not be available. The idea is that you simply create the basic objects and setup those parts of the pipeline not dependent on upstream sources and filters. You should also set the `actors` attribute up at this point. """ # Create and setup the default objects. self.seed = SourceWidget() self.stream_tracer = tvtk.StreamTracer( maximum_propagation=50, integration_direction='forward', compute_vorticity=True, integrator_type='runge_kutta4', ) self.ribbon_filter = tvtk.RibbonFilter() self.tube_filter = tvtk.TubeFilter() self.actor = Actor() # Setup the actor suitably for this module. self.actor.property.line_width = 2.0 def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when any of the inputs sends a `pipeline_changed` event. """ mm = self.module_manager if mm is None: return src = mm.source self.stream_tracer.input = src.outputs[0] self.seed.inputs = [src] # Setup the radius/width of the tube/ribbon filters based on # given input. if self._first: b = src.outputs[0].bounds l = [(b[1] - b[0]), (b[3] - b[2]), (b[5] - b[4])] length = sqrt(l[0] * l[0] + l[1] * l[1] + l[2] * l[2]) self.ribbon_filter.width = length * 0.0075 self.tube_filter.radius = length * 0.0075 self._first = False self._streamline_type_changed(self.streamline_type) # Set the LUT for the mapper. self.actor.set_lut(mm.scalar_lut_manager.lut) self.pipeline_changed = True def update_data(self): """Override this method so that it flushes the vtk pipeline if that is necessary. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ # Just set data_changed, the components should do the rest if # they are connected. self.data_changed = True ###################################################################### # Non-public methods. ###################################################################### def _streamline_type_changed(self, value): if self.module_manager is None: return st = self.stream_tracer rf = self.ribbon_filter tf = self.tube_filter if value == 'line': self.outputs = [st.output] elif value == 'ribbon': rf.input = st.output self.outputs = [rf.output] elif value == 'tube': tf.input = st.output self.outputs = [tf.output] self.render() def _update_streamlines_fired(self): self.seed.update_poly_data() self.render() def _stream_tracer_changed(self, old, new): if old is not None: old.on_trait_change(self.render, remove=True) seed = self.seed if seed is not None: new.source = seed.poly_data new.on_trait_change(self.render) mm = self.module_manager if mm is not None: new.input = mm.source.outputs[0] # A default output so there are no pipeline errors. The # update_pipeline call corrects this if needed. self.outputs = [new.output] self.update_pipeline() def _seed_changed(self, old, new): st = self.stream_tracer if st is not None: st.source = new.poly_data self._change_components(old, new) def _ribbon_filter_changed(self, old, new): if old is not None: old.on_trait_change(self.render, remove=True) new.on_trait_change(self.render) self._streamline_type_changed(self.streamline_type) def _tube_filter_changed(self, old, new): if old is not None: old.on_trait_change(self.render, remove=True) new.on_trait_change(self.render) self._streamline_type_changed(self.streamline_type) def _actor_changed(self, old, new): new.scene = self.scene new.inputs = [self] self._change_components(old, new)
class VTKFileReader(VTKXMLFileReader): """A VTK file reader. This does not handle the new XML file format but only the older format. The reader supports all the different types of data sets. This reader also supports a time series. """ # The version of this class. Used for persistence. __version__ = 0 # The VTK data file reader. reader = Instance(tvtk.DataSetReader, args=(), kw={ 'read_all_scalars': True, 'read_all_vectors': True, 'read_all_tensors': True, 'read_all_fields': True }) # Information about what this object can produce. output_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) ###################################################################### # Non-public interface ###################################################################### def _file_path_changed(self, fpath): value = fpath.get() if len(value) == 0: self.name = 'No VTK file' return else: self.reader.file_name = value self.update() # Setup the outputs by resetting self.outputs. Changing # the outputs automatically fires a pipeline_changed # event. try: n = self.reader.number_of_outputs except AttributeError: # for VTK >= 4.5 n = self.reader.number_of_output_ports outputs = [] for i in range(n): outputs.append(self.reader.get_output(i)) self.outputs = outputs # FIXME: Only the first output goes through the assign # attribute filter. aa = self._assign_attribute aa.input = outputs[0] outputs[0] = aa.output self.update_data() self.outputs = outputs # FIXME: The output info is only based on the first output. self.output_info.datasets = [get_tvtk_dataset_name(outputs[0])] # Change our name on the tree view self.name = self._get_name() def _get_name(self): """ Gets the name to display on the tree view. """ fname = basename(self.file_path.get()) ret = "VTK file (%s)" % fname if len(self.file_list) > 1: ret += " (timeseries)" if '[Hidden]' in self.name: ret += ' [Hidden]' return ret
class VRMLImporter(Source): __version__ = 0 # The file name. file_name = Str('', enter_set=True, auto_set=False, desc='the VRML file name') # The VRML importer. reader = Instance(tvtk.VRMLImporter, args=(), allow_none=False, record=True) output_info = PipelineInfo(datasets=['none']) ############### # Private traits. # Our file path used for persistence _file_path = Instance(FilePath, args=()) # Our View. view = View(Item(name='file_name', editor=FileEditor())) ###################################################################### # `object` interface ###################################################################### def __get_pure_state__(self): d = super(VRMLImporter, self).__get_pure_state__() # These traits are dynamically created. for name in ('reader', 'file_name'): d.pop(name) return d def __set_pure_state__(self, state): # The reader has its own file_name which needs to be fixed. fname = state._file_path.abs_pth # Now call the parent class to setup everything. self.initialize(fname) # Setup the rest of the state. set_state(self, state, ignore=['_file_path']) def initialize(self, file_name): self.file_name = file_name ###################################################################### # `PipelineBase` interface. ###################################################################### def add_actors(self): """Adds `self.actors` to the scene. """ if not self._actors_added: self.reader.render_window = self.scene.render_window self._update_reader() self._actors_added = True if not self.visible: self._visible_changed(self.visible) self.scene.render() def remove_actors(self): """Removes `self.actors` from the scene. """ if self._actors_added: self.scene.remove_actors(self.actors) self._actors_added = False self.scene.render() ###################################################################### # Non-public interface ###################################################################### def _file_name_changed(self, value): reader = self.reader reader.file_name = value self._file_path.set(value) self._update_reader() self.render() name = "VRML file (%s)" % basename(self.file_name) if '[Hidden]' in self.name: name += ' [Hidden]' self.name = name def _update_reader(self): reader = self.reader if self.scene is None or reader.file_name is None \ or len(reader.file_name) == 0: return actors1 = [x for x in self.scene.renderer.actors] reader.read() self.scene.render() actors2 = [x for x in self.scene.renderer.actors] self.actors = [x for x in actors2 if x not in actors1] # If these are the first actors on scene reset the view. if len(actors1) == 0: self.scene.reset_zoom() def _scene_changed(self, old, new): if self._actors_added: old.remove_actors(self.actors) reader = self.reader reader.render_window = new.render_window self._update_reader() def _actors_changed(self, old, new): if self._actors_added: self.scene.remove_actors(old) # The actors are added automatically when the importer # does a read. self.scene.render() def _actors_items_changed(self, list_event): if self._actors_added: self.scene.remove_actors(list_event.removed) # The actors are added automatically when the importer # does a read. self.scene.render() def _visible_changed(self, value): if value: if not self._actors_added: self.scene.add_actors(self.actors) self._actors_added = True super(VRMLImporter, self)._visible_changed(value)
class ExtractGrid(FilterBase): """This filter enables one to select a portion of, or subsample an input dataset which may be a StructuredPoints, StructuredGrid or Rectilinear. """ # The version of this class. Used for persistence. __version__ = 0 # Minimum x value. x_min = Range(value=0, low='_x_low', high='_x_high', enter_set=True, auto_set=False, desc='minimum x value of the domain') # Maximum x value. x_max = Range(value=10000, low='_x_low', high='_x_high', enter_set=True, auto_set=False, desc='maximum x value of the domain') # Minimum y value. y_min = Range(value=0, low='_y_low', high='_y_high', enter_set=True, auto_set=False, desc='minimum y value of the domain') # Maximum y value. y_max = Range(value=10000, low='_y_low', high='_y_high', enter_set=True, auto_set=False, desc='maximum y value of the domain') # Minimum z value. z_min = Range(value=0, low='_z_low', high='_z_high', enter_set=True, auto_set=False, desc='minimum z value of the domain') # Maximum z value. z_max = Range(value=10000, low='_z_low', high='_z_high', enter_set=True, auto_set=False, desc='maximum z value of the domain') # Sample rate in x. x_ratio = Range(value=1, low='_min_sample', high='_x_s_high', enter_set=True, auto_set=False, desc='sample rate along x') # Sample rate in y. y_ratio = Range(value=1, low='_min_sample', high='_y_s_high', enter_set=True, auto_set=False, desc='sample rate along y') # Sample rate in z. z_ratio = Range(value=1, low='_min_sample', high='_z_s_high', enter_set=True, auto_set=False, desc='sample rate along z') # The actual TVTK filter that this class manages. filter = Instance(tvtk.Object, tvtk.ExtractVOI(), allow_none=False) input_info = PipelineInfo( datasets=['image_data', 'rectilinear_grid', 'structured_grid'], attribute_types=['any'], attributes=['any']) output_info = PipelineInfo( datasets=['image_data', 'rectilinear_grid', 'structured_grid'], attribute_types=['any'], attributes=['any']) ######################################## # Private traits. # Determines the lower/upper limit of the axes for the sliders. _min_sample = Int(1) _x_low = Int(0) _x_high = Int(10000) _x_s_high = Int(100) _y_low = Int(0) _y_high = Int(10000) _y_s_high = Int(100) _z_low = Int(0) _z_high = Int(10000) _z_s_high = Int(100) ######################################## # View related traits. # The View for this object. view = View( Group(Item(label='Select Volume Of Interest'), Item(name='x_min'), Item(name='x_max'), Item(name='y_min'), Item(name='y_max'), Item(name='z_min'), Item(name='z_max'), Item('_'), Item(label='Select Sample Ratio'), Item(name='x_ratio'), Item(name='y_ratio'), Item(name='z_ratio'), label='VOI'), Group(Item(name='filter', style='custom', resizable=True), show_labels=False, label='Filter'), resizable=True, ) ###################################################################### # `object` interface ###################################################################### def __get_pure_state__(self): d = super(ExtractGrid, self).__get_pure_state__() for axis in ('x', 'y', 'z'): for name in ('_min', '_max'): d.pop(axis + name, None) d.pop('_' + axis + '_low', None) d.pop('_' + axis + '_high', None) d.pop('_' + axis + '_s_high', None) d.pop(axis + '_ratio', None) return d ###################################################################### # `Filter` interface ###################################################################### def update_pipeline(self): inputs = self.inputs if len(inputs) == 0: return input = inputs[0].outputs[0] mapping = { 'vtkStructuredGrid': tvtk.ExtractGrid, 'vtkRectilinearGrid': tvtk.ExtractRectilinearGrid, 'vtkImageData': tvtk.ExtractVOI } for key, klass in mapping.iteritems(): if input.is_a(key): self.filter = klass() break else: error('This filter does not support %s objects'%\ (input.__class__.__name__)) return fil = self.filter fil.input = input fil.update_whole_extent() fil.update() self._set_outputs([fil.output]) self._update_limits() self._update_voi() self._update_sample_rate() def update_data(self): """This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ self._update_limits() fil = self.filter fil.update_whole_extent() fil.update() # Propagate the data_changed event. self.data_changed = True ###################################################################### # Non-public methods. ###################################################################### def _update_limits(self): extents = self.filter.input.whole_extent self._x_low, self._x_high = extents[:2] self._y_low, self._y_high = extents[2:4] self._z_low, self._z_high = extents[4:] self._x_s_high = max(1, self._x_high) self._y_s_high = max(1, self._y_high) self._z_s_high = max(1, self._z_high) def _x_min_changed(self, val): if val > self.x_max: self.x_max = val else: self._update_voi() def _x_max_changed(self, val): if val < self.x_min: self.x_min = val else: self._update_voi() def _y_min_changed(self, val): if val > self.y_max: self.y_max = val else: self._update_voi() def _y_max_changed(self, val): if val < self.y_min: self.y_min = val else: self._update_voi() def _z_min_changed(self, val): if val > self.z_max: self.z_max = val else: self._update_voi() def _z_max_changed(self, val): if val < self.z_min: self.z_min = val else: self._update_voi() def _x_ratio_changed(self): self._update_sample_rate() def _y_ratio_changed(self): self._update_sample_rate() def _z_ratio_changed(self): self._update_sample_rate() def _update_voi(self): f = self.filter f.voi = (self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max) f.update_whole_extent() f.update() self.data_changed = True def _update_sample_rate(self): f = self.filter f.sample_rate = (self.x_ratio, self.y_ratio, self.z_ratio) f.update_whole_extent() f.update() self.data_changed = True def _filter_changed(self, old, new): if old is not None: old.on_trait_change(self.render, remove=True) new.on_trait_change(self.render)
class IsoSurface(Module): # The version of this class. Used for persistence. __version__ = 0 # The contour component. contour = Instance(Contour, record=True) # Specify if normals are to be computed to make a smoother surface. compute_normals = Bool(True, desc='if normals are to be computed '\ 'to make the iso-surface smoother') # The component that computes the normals. normals = Instance(PolyDataNormals, record=True) # The actor component that represents the iso-surface. actor = Instance(Actor, record=True) input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['scalars']) ######################################## # The view of this object. # Commented out, since we are now using the iso_surface_view.py version. #view = View([Group( # Item( name = 'contour', # style = 'custom' ), # show_labels = False, # show_border = True, # label = 'Contours' ), # Group( # Item( name = 'compute_normals' ), # '_', # Item( name = 'normals', # style = 'custom', # show_label = False, # enabled_when = 'compute_normals' ), # show_border = True, # label = 'Normals' ), # Group( # Item( name = 'actor', # style = 'custom' ), # show_labels = False ) # ] # ) ###################################################################### # `Module` interface ###################################################################### def setup_pipeline(self): """Override this method so that it *creates* the tvtk pipeline. This method is invoked when the object is initialized via `__init__`. Note that at the time this method is called, the tvtk data pipeline will *not* yet be setup. So upstream data will not be available. The idea is that you simply create the basic objects and setup those parts of the pipeline not dependent on upstream sources and filters. You should also set the `actors` attribute up at this point. """ # Create the components self.contour = Contour(show_filled_contours=False) self.normals = PolyDataNormals() self.actor = Actor() # Setup the actor suitably for this module. self.actor.mapper.scalar_visibility = 1 def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when any of the inputs sends a `pipeline_changed` event. """ mm = self.module_manager if mm is None: return # Data is available, so set the input for the grid plane. self.contour.inputs = [mm.source] # Force the normals setting to be noted. self._compute_normals_changed(self.compute_normals) # Set the LUT for the mapper. self.actor.set_lut(mm.scalar_lut_manager.lut) self.pipeline_changed = True def update_data(self): """Override this method so that it flushes the vtk pipeline if that is necessary. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ # Just set data_changed, the component should do the rest. self.data_changed = True ###################################################################### # Non-public interface. ###################################################################### def _compute_normals_changed(self, value): if self.module_manager is None: return actor = self.actor if value: if actor: actor.inputs = [self.normals] else: if actor: actor.inputs = [self.contour] self.render() def _contour_changed(self, old, new): normals = self.normals if normals is not None: normals.inputs = [new] self._change_components(old, new) def _normals_changed(self, old, new): contour = self.contour if contour is not None: new.inputs = [contour] self._change_components(old, new) def _actor_changed(self, old, new): # Here we set the inputs in any case to avoid VTK pipeline # errors. The pipeline is corrected when update_pipeline is # called anyway. contour = self.contour if contour is not None: new.inputs = [contour] self._change_components(old, new)
from enthought.tvtk.api import tvtk from enthought.mayavi.sources.vtk_data_source import VTKDataSource # Do your own reader stuff here, I'm just reading a VTK file with a # different extension here. import SpeFile as spe r = spe.SpeFile(fname) from enthought.mayavi.sources.api import ArraySource src = ArraySource() src.scalar_data = r.GetNumArray() src.scalar_name = fname import os.path src.name = os.path.split(fname)[-1] return src spe_reader_info = SourceMetadata(id="Winview File Reader", factory='spe_reader.spe_reader', tooltip="Load a SPE file", desc="Load a SPE file", help="Load a SPE file", menu_name="&SPE file", extensions=['spe', 'SPE'], wildcard='SPE files (*.SPE)|*.SPE', output_info=PipelineInfo( datasets=['image_data'], attribute_types=['any'], attributes=['any'])) # Inject this information in the mayavi registry registry.sources.append(spe_reader_info)
class ImageDataProbe(Filter): """ A filter that can be used to probe any dataset using a Structured Points dataset. The filter also allows one to convert the scalar data to an unsigned short array so that the scalars can be used for volume visualization. """ # The image data onto which the data is probed. probe_data = Instance(tvtk.ImageData, args=()) # The probe filter. filter = Instance(tvtk.ProbeFilter, args=()) rescale_scalars = Bool(False, desc='if the input scalars are '\ 'rescaled to an unsigned short '\ 'array') # Specifies if we can change the spacing/dimensions -- not allowed # for imagedata/structured points data. allow_changes = Bool(True) # Spacing of points in the image data. spacing = Array(value=(0.0, 0.0, 0.0), shape=(3, ), cols=1, dtype=float, enter_set=True, auto_set=False, labels=['sx', 'sy', 'sz'], desc='the spacing of points') # Dimensions of the image data. dimensions = Array(value=(0, 0, 0), shape=(3, ), cols=1, dtype=int, enter_set=True, auto_set=False, labels=['nx', 'ny', 'nz'], desc='the dimensions of the image data') # Reset settings to defaults. reset_defaults = Button(desc='if probe data is reset to defaults') # Name of rescaled scalar to generate. rescaled_scalar_name = Str('probe_us_array') input_info = PipelineInfo(datasets=['image_data'], attribute_types=['any'], attributes=['any']) output_info = PipelineInfo(datasets=['image_data'], attribute_types=['any'], attributes=['any']) ######################################## # Private traits. # A trait to prevent static handlers from firing unnecessarily. _event_handled = Bool(False) ######################################## # View related traits. view = View(Group( Item(name='dimensions', enabled_when='allow_changes'), Item(name='spacing', enabled_when='allow_changes'), Item(name='rescale_scalars'), Item(name='reset_defaults', show_label=False), ), resizable=True) ###################################################################### # `Filter` interface. ###################################################################### def setup_pipeline(self): """Creates the pipeline.""" self.filter.input = self.probe_data def update_pipeline(self): """Connect and update the pipeline.""" inputs = self.inputs if len(inputs) == 0: return fil = self.filter fil.source = inputs[0].outputs[0] reset = False if self.dimensions.sum() == 0: reset = True self._setup_probe_data(reset) fil.update() self._rescale_scalars_changed(self.rescale_scalars) self._set_outputs([fil.output]) ###################################################################### # Non-public interface. ###################################################################### def _setup_probe_data(self, reset=False): pd = self.probe_data input = self.inputs[0].outputs[0] if input.is_a('vtkImageData'): self.allow_changes = False self.set(spacing=input.spacing, dimensions=input.dimensions) pd.set(origin=input.origin, dimensions=input.dimensions, spacing=input.spacing) pd.update() elif reset: self.allow_changes = True b = numpy.array(input.bounds) pd.origin = b[::2] l = b[1::2] - b[::2] tot_len = sum(l) npnt = pow(input.number_of_points, 1. / 3.) + 0.5 fac = 3.0 * npnt / tot_len dims = (l * fac).astype(int) + 1 extent = (0, dims[0] - 1, 0, dims[1] - 1, 0, dims[2] - 1) pd.set(extent=extent, update_extent=extent, whole_extent=extent, dimensions=dims) max_dim = dims.max() dims = (dims - 1).clip(min=1, max=max_dim + 1) l = l.clip(min=1e-3, max=l.max() + 1.0) pd.spacing = l / dims self._event_handled = True self.set(spacing=pd.spacing, dimensions=pd.dimensions) self._event_handled = False pd.update() def _rescale_scalars_changed(self, value): out = self.filter.output pd = out.point_data sc = pd.scalars if sc is None: # no input scalars return if not value: orig_sc = self.inputs[0].outputs[0].point_data.scalars if sc.is_a('vtkUnsignedShortArray') and \ sc.name == self.rescaled_scalar_name: pd.set_active_scalars(orig_sc.name) pd.update() self.pipeline_changed = True self.render() return s_min, s_max = sc.range # checking to see if input array is constant. avg = (s_max + s_min) * 0.5 diff = 1 if (s_max > avg) and (avg > s_min): diff = s_max - s_min arr = (sc.to_array() - s_min) * 65535.0 / diff uc = tvtk.UnsignedShortArray(name=self.rescaled_scalar_name) uc.from_array(arr) pd.add_array(uc) pd.set_active_scalars(self.rescaled_scalar_name) pd.update() self.pipeline_changed = True self.render() def _dimensions_changed(self, value): if not self.allow_changes or self._event_handled: return max_d = value.max() dims = (value - 1).clip(min=1, max=max_d) b = numpy.array(self.inputs[0].outputs[0].bounds) l = b[1::2] - b[::2] self.spacing = l / dims self._update_probe() def _spacing_changed(self, value): if not self.allow_changes or self._event_handled: return b = numpy.array(self.inputs[0].outputs[0].bounds) l = b[1::2] - b[::2] dims = (l / value + 0.5).astype(int) + 1 # Recalculate space because of rounding. maxd = dims.max() dims1 = (dims - 1).clip(min=1, max=maxd) sp = l / dims1 self._event_handled = True self.set(spacing=sp, dimensions=dims) self._event_handled = False self._update_probe() def _update_probe(self): pd = self.probe_data dims = self.dimensions spacing = self.spacing extent = (0, dims[0] - 1, 0, dims[1] - 1, 0, dims[2] - 1) pd.set(extent=extent, update_extent=extent, whole_extent=extent, dimensions=dims, spacing=spacing) pd.modified() pd.update() fil = self.filter w = fil.global_warning_display fil.global_warning_display = False fil.remove_all_inputs() fil.input = pd fil.update_whole_extent() fil.update() self._rescale_scalars_changed(self.rescale_scalars) fil.global_warning_display = w self.data_changed = True def _reset_defaults_fired(self): self._setup_probe_data(reset=True) self._rescale_scalars_changed(self.rescale_scalars)
# factory function here for convenience, we could also use a class but # the reasons for doing this are documented below. ############################################################################# # READERS ############################################################################# import spe_reader import sp4_reader import mat_reader ############################################################################# #FILTERS ############################################################################# ############################################################################# #MODULES ############################################################################# user_outline = ModuleMetadata( id="UserOutlineModule", menu_name="&UserOutline", factory='mymod.user_outline', desc="Draw a cornered outline for given input", tooltip="Draw a cornered outline for given input", help="Draw a cornered outline for given input", input_info=PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any'])) # Register the module with the mayavi registry. registry.modules.append(user_outline)
class Surface(Module): # The version of this class. Used for persistence. __version__ = 0 # Specifies if contouring is to be done or not. enable_contours = Bool(False, desc='if contours are generated') # The contour component that contours the data. contour = Instance(Contour, allow_none=False, record=True) # The actor component that represents the visualization. actor = Instance(Actor, allow_none=False, record=True) input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) ###################################################################### # `Module` interface ###################################################################### def setup_pipeline(self): """Override this method so that it *creates* the tvtk pipeline. This method is invoked when the object is initialized via `__init__`. Note that at the time this method is called, the tvtk data pipeline will *not* yet be setup. So upstream data will not be available. The idea is that you simply create the basic objects and setup those parts of the pipeline not dependent on upstream sources and filters. You should also set the `actors` attribute up at this point. """ # Setup the objects. self.contour = Contour(auto_contours=True, number_of_contours=10) self.actor = Actor() # Setup the actor suitably for this module. self.actor.property.line_width = 2.0 def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when any of the inputs sends a `pipeline_changed` event. """ mm = self.module_manager if mm is None: return # This makes sure that any changes made to enable_contours # when the module is not running are updated when it is # started. Also sets up the pipeline and inputs correctly. self._enable_contours_changed(self.enable_contours) # Set the LUT for the mapper. self.actor.set_lut(mm.scalar_lut_manager.lut) self.pipeline_changed = True def update_data(self): """Override this method so that it flushes the vtk pipeline if that is necessary. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ # Just set data_changed, the components should do the rest if # they are connected. self.data_changed = True ###################################################################### # Non-public methods. ###################################################################### def _filled_contours_changed(self, value): """When filled contours are enabled, the mapper should use the the cell data, otherwise it should use the default scalar mode. """ if value: self.actor.mapper.scalar_mode = 'use_cell_data' else: self.actor.mapper.scalar_mode = 'default' self.render() def _enable_contours_changed(self, value): """Turns on and off the contours.""" if self.module_manager is None: return if value: self.contour.inputs = [self.module_manager.source] self.actor.inputs = [self.contour] if self.contour.filled_contours: self.actor.mapper.scalar_mode = 'use_cell_data' else: old_inputs = self.actor.inputs self.actor.inputs = [self.module_manager.source] self.actor.mapper.scalar_mode = 'default' self.render() def _contour_changed(self, old, new): if old is not None: old.on_trait_change(self._filled_contours_changed, 'filled_contours', remove=True) new.on_trait_change(self._filled_contours_changed, 'filled_contours') self._change_components(old, new) def _actor_changed(self, old, new): if old is None: # First time the actor is set. new.mapper = tvtk.DataSetMapper(use_lookup_table_scalar_range=1) new.scene = self.scene mm = self.module_manager if mm is not None: new.inputs = [mm.source] self._change_components(old, new)
class VTKXMLFileReader(FileDataSource): """A VTK XML file reader. The reader supports all the different types of data sets. This reader also supports a time series. Currently, this reader assumes that there is only one output that has configurable attributes. """ # The version of this class. Used for persistence. __version__ = 0 ######################################## # Dynamic traits: These traits are dynamic and are automatically # updated depending on the contents of the file. # The active point scalar name. An empty string indicates that # the attribute is "deactivated". This is useful when you have # both point and cell attributes and want to use cell data by # default. point_scalars_name = DEnum(values_name='_point_scalars_list', desc='scalar point data attribute to use') # The active point vector name. point_vectors_name = DEnum(values_name='_point_vectors_list', desc='vectors point data attribute to use') # The active point tensor name. point_tensors_name = DEnum(values_name='_point_tensors_list', desc='tensor point data attribute to use') # The active cell scalar name. cell_scalars_name = DEnum(values_name='_cell_scalars_list', desc='scalar cell data attribute to use') # The active cell vector name. cell_vectors_name = DEnum(values_name='_cell_vectors_list', desc='vectors cell data attribute to use') # The active cell tensor name. cell_tensors_name = DEnum(values_name='_cell_tensors_list', desc='tensor cell data attribute to use') ######################################## # The VTK data file reader. reader = Instance(tvtk.XMLReader) # Information about what this object can produce. output_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) # Our view. view = View(Group(Include('time_step_group'), Item(name='point_scalars_name'), Item(name='point_vectors_name'), Item(name='point_tensors_name'), Item(name='cell_scalars_name'), Item(name='cell_vectors_name'), Item(name='cell_tensors_name'), Item(name='reader'), )) ######################################## # Private traits. # These private traits store the list of available data # attributes. The non-private traits use these lists internally. _point_scalars_list = List(Str) _point_vectors_list = List(Str) _point_tensors_list = List(Str) _cell_scalars_list = List(Str) _cell_vectors_list = List(Str) _cell_tensors_list = List(Str) # This filter allows us to change the attributes of the data # object and will ensure that the pipeline is properly taken care # of. Directly setting the array in the VTK object will not do # this. _assign_attribute = Instance(tvtk.AssignAttribute, args=(), allow_none=False) # Toggles if this is the first time this object has been used. _first = Bool(True) ###################################################################### # `object` interface ###################################################################### def __get_pure_state__(self): d = super(VTKXMLFileReader, self).__get_pure_state__() for name in ('_assign_attribute', '_first'): d.pop(name, None) # Pickle the 'point_scalars_name' etc. since these are # properties and not in __dict__. attr = {} for name in ('point_scalars', 'point_vectors', 'point_tensors', 'cell_scalars', 'cell_vectors', 'cell_tensors'): d.pop('_' + name + '_list', None) d.pop('_' + name + '_name', None) x = name + '_name' attr[x] = getattr(self, x) d.update(attr) return d def __set_pure_state__(self, state): # The reader has its own file_name which needs to be fixed. state.reader.file_name = state.file_path.abs_pth # Now call the parent class to setup everything. super(VTKXMLFileReader, self).__set_pure_state__(state) ###################################################################### # `Base` interface ###################################################################### def start(self): """This is invoked when this object is added to the mayavi pipeline. """ # Do nothing if we are already running. if self.running: return # Call the parent method to do its thing. This will typically # start all our children. super(VTKXMLFileReader, self).start() def stop(self): """Invoked when this object is removed from the mayavi pipeline. """ if not self.running: return # Call the parent method to do its thing. super(VTKXMLFileReader, self).stop() ###################################################################### # `FileDataSource` interface ###################################################################### def update(self): if len(self.file_path.get()) == 0: return reader = self.reader reader.update() self.render() def update_data(self): if len(self.file_path.get()) == 0: return self.reader.update() pnt_attr, cell_attr = get_all_attributes(self.reader.output) def _setup_data_traits(obj, attributes, d_type): """Given the object, the dict of the attributes from the `get_all_attributes` function and the data type (point/cell) data this will setup the object and the data. """ attrs = ['scalars', 'vectors', 'tensors'] aa = obj._assign_attribute data = getattr(obj.reader.output, '%s_data'%d_type) for attr in attrs: values = attributes[attr] values.append('') setattr(obj, '_%s_%s_list'%(d_type, attr), values) if len(values) > 1: default = getattr(obj, '%s_%s_name'%(d_type, attr)) if obj._first and len(default) == 0: default = values[0] getattr(data, 'set_active_%s'%attr)(default) aa.assign(default, attr.upper(), d_type.upper() +'_DATA') aa.update() kw = {'%s_%s_name'%(d_type, attr): default, 'trait_change_notify': False} obj.set(**kw) _setup_data_traits(self, cell_attr, 'cell') _setup_data_traits(self, pnt_attr, 'point') if self._first: self._first = False # Propagate the data changed event. self.data_changed = True ###################################################################### # Non-public interface ###################################################################### def _file_path_changed(self, fpath): value = fpath.get() if len(value) == 0: return else: if self.reader is None: d_type = find_file_data_type(fpath.get()) self.reader = eval('tvtk.XML%sReader()'%d_type) reader = self.reader reader.file_name = value reader.update() # Setup the outputs by resetting self.outputs. Changing # the outputs automatically fires a pipeline_changed # event. try: n = reader.number_of_outputs except AttributeError: # for VTK >= 4.5 n = reader.number_of_output_ports outputs = [] for i in range(n): outputs.append(reader.get_output(i)) # FIXME: Only the first output goes through the assign # attribute filter. aa = self._assign_attribute aa.input = outputs[0] outputs[0] = aa.output self.update_data() self.outputs = outputs # FIXME: The output info is only based on the first output. self.output_info.datasets = [get_tvtk_dataset_name(outputs[0])] # Change our name on the tree view self.name = self._get_name() def _set_data_name(self, data_type, attr_type, value): if value is None: return reader_output = self.reader.output if len(value) == 0: # If the value is empty then we deactivate that attribute. d = getattr(reader_output, attr_type + '_data') method = getattr(d, 'set_active_%s'%data_type) method(None) self.data_changed = True return aa = self._assign_attribute data = None if attr_type == 'point': data = reader_output.point_data elif attr_type == 'cell': data = reader_output.cell_data method = getattr(data, 'set_active_%s'%data_type) method(value) aa.assign(value, data_type.upper(), attr_type.upper() +'_DATA') aa.update() # Fire an event, so the changes propagate. self.data_changed = True def _point_scalars_name_changed(self, value): self._set_data_name('scalars', 'point', value) def _point_vectors_name_changed(self, value): self._set_data_name('vectors', 'point', value) def _point_tensors_name_changed(self, value): self._set_data_name('tensors', 'point', value) def _cell_scalars_name_changed(self, value): self._set_data_name('scalars', 'cell', value) def _cell_vectors_name_changed(self, value): self._set_data_name('vectors', 'cell', value) def _cell_tensors_name_changed(self, value): self._set_data_name('tensors', 'cell', value) def _get_name(self): """ Gets the name to display on the tree view. """ fname = basename(self.file_path.get()) ret = "VTK XML file (%s)"%fname if len(self.file_list) > 1: ret += " (timeseries)" if '[Hidden]' in self.name: ret += ' [Hidden]' return ret
class ImagePlaneWidget(Module): # The version of this class. Used for persistence. __version__ = 0 ipw = Instance(tvtk.ImagePlaneWidget, allow_none=False, record=True) use_lookup_table = Bool( True, help='Use a lookup table to map input scalars to colors') input_info = PipelineInfo(datasets=['image_data'], attribute_types=['any'], attributes=['scalars']) view = View(Group(Item(name='ipw', style='custom', resizable=True), show_labels=False), width=600, height=600, resizable=True, scrollable=True) ###################################################################### # `Module` interface ###################################################################### def setup_pipeline(self): """Override this method so that it *creates* the tvtk pipeline. This method is invoked when the object is initialized via `__init__`. Note that at the time this method is called, the tvtk data pipeline will *not* yet be setup. So upstream data will not be available. The idea is that you simply create the basic objects and setup those parts of the pipeline not dependent on upstream sources and filters. You should also set the `actors` attribute up at this point. """ # Create the various objects for this module. self.ipw = tvtk.ImagePlaneWidget(display_text=1, key_press_activation=0, left_button_action=1, middle_button_action=0, user_controlled_lookup_table=True) self.setup_lut() def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when any of the inputs sends a `pipeline_changed` event. """ mod_mgr = self.module_manager if mod_mgr is None: return # Data is available, so set the input for the IPW. input = mod_mgr.source.outputs[0] if not (input.is_a('vtkStructuredPoints') \ or input.is_a('vtkImageData')): msg = 'ImagePlaneWidget only supports structured points or '\ 'image data.' error(msg) raise TypeError, msg self.ipw.input = input self.setup_lut() def update_data(self): """Override this method so that it flushes the vtk pipeline if that is necessary. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ # Just set data_changed, the component should do the rest. self.data_changed = True @on_trait_change('use_lookup_table') def setup_lut(self): # Set the LUT for the IPW. if self.use_lookup_table: if self.module_manager is not None: self.ipw.lookup_table = \ self.module_manager.scalar_lut_manager.lut else: self.ipw.color_map.lookup_table = None self.render() ###################################################################### # Non-public methods. ###################################################################### def _ipw_changed(self, old, new): if old is not None: old.on_trait_change(self.render, remove=True) self.widgets.remove(old) new.on_trait_change(self.render) self.widgets.append(new) if old is not None: self.update_pipeline() self.pipeline_changed = True
class PolyDataReader(FileDataSource): """A PolyData file reader. The reader supports all the different types of poly data files. """ # The version of this class. Used for persistence. __version__ = 0 # The PolyData file reader reader = Instance(tvtk.Object, allow_none=False, record=True) ###################################################################### # Private Traits _reader_dict = Dict(Str, Instance(tvtk.Object)) # Our View. view = View(Group(Include('time_step_group'), Item(name='base_file_name'), Item(name='reader', style='custom', resizable=True), show_labels=False), resizable=True) #output_info = PipelineInfo(datasets=['none']) output_info = PipelineInfo(datasets=['poly_data'], attribute_types=['any'], attributes=['any']) ###################################################################### # `object` interface ###################################################################### def __set_pure_state__(self, state): # The reader has its own file_name which needs to be fixed. state.reader.file_name = state.file_path.abs_pth # Now call the parent class to setup everything. super(PolyDataReader, self).__set_pure_state__(state) ###################################################################### # `FileDataSource` interface ###################################################################### def update(self): self.reader.update() if len(self.file_path.get()) == 0: return self.render() ###################################################################### # Non-public interface ###################################################################### def _file_path_changed(self, fpath): value = fpath.get() if len(value) == 0: return # Extract the file extension splitname = value.strip().split('.') extension = splitname[-1].lower() # Select polydata reader based on file type old_reader = self.reader if self._reader_dict.has_key(extension): self.reader = self._reader_dict[extension] else: error('Invalid extension for file: %s' % value) return self.reader.file_name = value.strip() self.reader.update() self.reader.update_information() if old_reader is not None: old_reader.on_trait_change(self.render, remove=True) self.reader.on_trait_change(self.render) old_outputs = self.outputs self.outputs = [self.reader.output] if self.outputs == old_outputs: self.data_changed = True # Change our name on the tree view self.name = self._get_name() def _get_name(self): """ Returns the name to display on the tree view. Note that this is not a property getter. """ fname = basename(self.file_path.get()) ret = "%s" % fname if len(self.file_list) > 1: ret += " (timeseries)" if '[Hidden]' in self.name: ret += ' [Hidden]' return ret def __reader_dict_default(self): """Default value for reader dict.""" rd = { 'stl': tvtk.STLReader(), 'stla': tvtk.STLReader(), 'stlb': tvtk.STLReader(), 'txt': tvtk.SimplePointsReader(), 'raw': tvtk.ParticleReader(), 'ply': tvtk.PLYReader(), 'pdb': tvtk.PDBReader(), 'slc': tvtk.SLCReader(), 'xyz': tvtk.XYZMolReader(), 'obj': tvtk.OBJReader(), 'facet': tvtk.FacetReader(), 'cube': tvtk.GaussianCubeReader(), 'g': tvtk.BYUReader(), } return rd # Callable to check if the reader can actually read the file def can_read(cls, filename): """ Class method to check if the reader can actually read the file. Returns 'True' if it can read it succesfully else 'False' """ # Extract the file extension splitname = filename.strip().split('.') extension = splitname[-1].lower() if extension == 'xyz': from vtk import vtkObject o = vtkObject w = o.GetGlobalWarningDisplay() o.SetGlobalWarningDisplay(0) # Turn it off. r = tvtk.XYZMolReader() r.file_name = filename r.update() o.SetGlobalWarningDisplay(w) if len(r.output.points) != 0: return True return False return None can_read = classmethod(can_read)
class Text(Module): # The version of this class. Used for persistence. __version__ = 0 # The tvtk TextActor. actor = Instance(tvtk.TextActor, allow_none=False, record=True) # The property of the axes (color etc.). property = Property(record=True) # The text to be displayed. Note that this should really be `Str` # but wxGTK only returns unicode. text = Str('Text', desc='the text to be displayed') # The x-position of this actor. x_position = Float(0.0, desc='the x-coordinate of the text') # The y-position of this actor. y_position = Float(0.0, desc='the y-coordinate of the text') # The z-position of this actor. z_position = Float(0.0, desc='the z-coordinate of the text') # Shadow the positions as ranges for 2D. Simply using a RangeEditor # does not work as it resets the 3D positions to 1 when the dialog is # loaded. _x_position_2d = Range(0., 1., 0., enter_set=True, auto_set=False, desc='the x-coordinate of the text') _y_position_2d = Range(0., 1., 0., enter_set=True, auto_set=False, desc='the y-coordinate of the text') # 3D position position_in_3d = Bool( False, desc='whether the position of the object is given in 2D or in 3D') # The width of the text. width = Range(0.0, 1.0, 0.4, enter_set=True, auto_set=False, desc='the width of the text as a fraction of the viewport') input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) ######################################## # The view of this object. if VTK_VER > 5.1: _text_actor_group = Group(Item(name='visibility'), Item(name='text_scale_mode'), Item(name='alignment_point'), Item(name='minimum_size'), Item(name='maximum_line_height'), show_border=True, label='Text Actor') else: _text_actor_group = Group(Item(name='visibility'), Item(name='scaled_text'), Item(name='alignment_point'), Item(name='minimum_size'), Item(name='maximum_line_height'), show_border=True, label='Text Actor') _position_group_2d = Group(Item(name='_x_position_2d', label='X position'), Item(name='_y_position_2d', label='Y position'), visible_when='not position_in_3d') _position_group_3d = Group(Item(name='x_position', label='X', springy=True), Item(name='y_position', label='Y', springy=True), Item(name='z_position', label='Z', springy=True), show_border=True, label='Position', orientation='horizontal', visible_when='position_in_3d') view = View(Group(Group(Item(name='text'), Item(name='position_in_3d'), _position_group_2d, _position_group_3d, Item(name='width', enabled_when='object.actor.scaled_text'), ), Group(Item(name='actor', style='custom', editor=\ InstanceEditor(view=View(_text_actor_group)) ), show_labels=False), label='TextActor', show_labels=False ), Group(Item(name='_property', style='custom', resizable=True), label='TextProperty', show_labels=False), ) ######################################## # Private traits. _updating = Bool(False) _property = Instance(tvtk.TextProperty) ###################################################################### # `object` interface ###################################################################### def __set_pure_state__(self, state): self._updating = True state_pickler.set_state(self, state, first=['actor'], ignore=['_updating']) self._updating = False ###################################################################### # `Module` interface ###################################################################### def setup_pipeline(self): """Override this method so that it *creates* the tvtk pipeline. This method is invoked when the object is initialized via `__init__`. Note that at the time this method is called, the tvtk data pipeline will *not* yet be setup. So upstream data will not be available. The idea is that you simply create the basic objects and setup those parts of the pipeline not dependent on upstream sources and filters. You should also set the `actors` attribute up at this point. """ actor = self.actor = tvtk.TextActor(input=str(self.text)) if VTK_VER > 5.1: actor.set(text_scale_mode='prop', width=0.4, height=1.0) else: actor.set(scaled_text=True, width=0.4, height=1.0) c = actor.position_coordinate c.set(coordinate_system='normalized_viewport', value=(self.x_position, self.y_position, 0.0)) c = actor.position2_coordinate c.set(coordinate_system='normalized_viewport') self._property.opacity = 1.0 self._text_changed(self.text) self._width_changed(self.width) self._shadow_positions(True) def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when any of the inputs sends a `pipeline_changed` event. """ self.pipeline_changed = True def update_data(self): """Override this method so that it flushes the vtk pipeline if that is necessary. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ # Just set data_changed, the component should do the rest. self.data_changed = True ###################################################################### # Non-public interface ###################################################################### def _text_changed(self, value): actor = self.actor if actor is None: return if self._updating: return actor.input = str(value) self.render() def _shadow_positions(self, value): self.sync_trait('x_position', self, '_x_position_2d', remove=(not value)) self.sync_trait('y_position', self, '_y_position_2d', remove=(not value)) if not value: self._x_position_2d = self.x_position self._y_position_2d = self.y_position def _position_in_3d_changed(self, value): if value: self.actor.position_coordinate.coordinate_system = 'world' self.actor.position2_coordinate.coordinate_system = 'world' else: self.actor.position2_coordinate.coordinate_system=\ 'normalized_viewport' self.actor.position_coordinate.coordinate_system=\ 'normalized_viewport' x = self.x_position y = self.y_position if x < 0: x = 0 elif x > 1: x = 1 if y < 0: y = 0 elif y > 1: y = 1 self.set(x_position=x, y_position=y, trait_change_notify=False) self._shadow_positions(not value) self._change_position() self.actor._width_changed(self.width, self.width) self.pipeline_changed = True def _change_position(self): """ Callback for _x_position, _y_position, and z_position. """ actor = self.actor if actor is None: return if self._updating: return x = self.x_position y = self.y_position z = self.z_position if self.position_in_3d: actor.position_coordinate.value = x, y, z else: actor.position = x, y self.render() _x_position_changed = _change_position _y_position_changed = _change_position _z_position_changed = _change_position def _width_changed(self, value): actor = self.actor if actor is None: return if self._updating: return actor.width = value self.render() def _update_traits(self): self._updating = True try: actor = self.actor self.text = actor.input pos = actor.position self.x_position, self.y_position = pos self.width = actor.width finally: self._updating = False def _get_property(self): return self._property def _actor_changed(self, old, new): if old is not None: for obj in (old, self._property): obj.on_trait_change(self.render, remove=True) old.on_trait_change(self._update_traits, remove=True) self._property = new.text_property for obj in (new, self._property): obj.on_trait_change(self.render) new.on_trait_change(self._update_traits) self.actors = [new] self.render() def _foreground_changed_for_scene(self, old, new): # Change the default color for the actor. self.property.color = new self.render() def _scene_changed(self, old, new): super(Text, self)._scene_changed(old, new) self._foreground_changed_for_scene(None, new.foreground)
class SelectOutput(Filter): """ This filter lets a user select one among several of the outputs of a given input. This is typically very useful for a multi-block data source. """ # The output index in the input to choose from. output_index = Range(value=0, enter_set=True, auto_set=False, low='_min_index', high='_max_index') input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) output_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['any']) # The minimum output index of our input. _min_index = Int(0, desc='the minimum output index') # The maximum output index of our input. _max_index = Int(0, desc='the maximum output index') ######################################## # Traits View. view = View(Group(Item('output_index', enabled_when='_max_index > 0')), resizable=True) ###################################################################### # `object` interface. def __get_pure_state__(self): d = super(SelectOutput, self).__get_pure_state__() d['output_index'] = self.output_index return d def __set_pure_state__(self, state): super(SelectOutput, self).__set_pure_state__(state) # Force an update of the output index -- if not this doesn't # change. self._output_index_changed(state.output_index) ###################################################################### # `Filter` interface. def update_pipeline(self): # Do nothing if there is no input. inputs = self.inputs if len(inputs) == 0: return # Set the maximum index. self._max_index = len(inputs[0].outputs) - 1 self._output_index_changed(self.output_index) def update_data(self): # Propagate the event. self.data_changed = True ###################################################################### # Trait handlers. def _output_index_changed(self, value): """Static trait handler.""" if value > self._max_index: self.output_index = self._max_index elif value < self._min_index: self.output_index = self._min_index else: self._set_outputs([self.inputs[0].outputs[value]]) s = self.scene if s is not None: s.renderer.reset_camera_clipping_range() s.render()
class Filter(Source): """ Base class for all the Mayavi filters. """ # The version of this class. Used for persistence. __version__ = 0 # The inputs for this filter. inputs = List(PipelineBase, record=False) # The icon icon = Str('filter.ico') # The human-readable type for this object type = Str(' filter') # Information about what this object can consume. input_info = PipelineInfo(datasets=['any']) ###################################################################### # `object` interface. ###################################################################### def __init__(self, **traits): super(Filter, self).__init__(**traits) # Let the filter setup its pipeline. self.setup_pipeline() def __get_pure_state__(self): d = super(Filter, self).__get_pure_state__() # Inputs are setup dynamically, don't pickle them. d.pop('inputs', None) return d ###################################################################### # `Filter` interface. ###################################################################### def setup_pipeline(self): """Override this method so that it *creates* its tvtk pipeline. This method is invoked when the object is initialized via `__init__`. Note that at the time this method is called, the tvtk data pipeline will *not* yet be setup. So upstream data will not be available. The idea is that you simply create the basic objects and setup those parts of the pipeline not dependent on upstream sources and filters. """ pass def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when the input fires a `pipeline_changed` event. """ raise NotImplementedError def update_data(self): """Override this method to do what is necessary when upstream data changes. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ # Invoke render to update any changes. self.render() # Propagate the data_changed event. self.data_changed = True ###################################################################### # `Base` interface ###################################################################### def start(self): """This is invoked when this object is added to the mayavi pipeline. Note that when start is invoked, all the other information for the pipeline should be already set. """ # Do nothing if we are already running. if self.running: return # Setup event handlers. self._setup_event_handlers() # Update the pipeline. self.update_pipeline() # Call parent method to start the children and set the state. super(Filter, self).start() def stop(self): """Invoked when this object is removed from the mayavi pipeline. This is where you remove your actors from the scene. """ if not self.running: return # Teardown event handlers. self._teardown_event_handlers() # Call parent method to stop the children and set the state. super(Filter, self).stop() ###################################################################### # Non-public interface ###################################################################### def _set_outputs(self, new_outputs): """Set `self.outputs` to the given list of `new_outputs`. You should always use this method to set `self.outputs`. """ old_outputs = self.outputs self.outputs = new_outputs if len(new_outputs) > 0: self.output_info.datasets = \ [get_tvtk_dataset_name(new_outputs[0])] if old_outputs == self.outputs: # Even if the outputs don't change we want to propagate a # data_changed event since the data could have changed. self.data_changed = True def _inputs_changed(self, old, new): if self.running: self.update_pipeline() self._setup_input_events(old, new) def _inputs_items_changed(self, list_event): if self.running: self.update_pipeline() self._setup_input_events(list_event.removed, list_event.added) def _setup_event_handlers(self): self._setup_input_events([], self.inputs) def _teardown_event_handlers(self): self._setup_input_events(self.inputs, []) def _setup_input_events(self, removed, added): for input in removed: input.on_trait_event(self.update_pipeline, 'pipeline_changed', remove=True) input.on_trait_event(self.update_data, 'data_changed', remove=True) for input in added: input.on_trait_event(self.update_pipeline, 'pipeline_changed') input.on_trait_event(self.update_data, 'data_changed')