class ListModel(HasTraits): value = List() possible_values = List(["one", "two"])
class MayaviViewer(HasTraits): """ This class represents a Mayavi based viewer for the particles. They are queried from a running solver. """ particle_arrays = List(Instance(ParticleArrayHelper), []) pa_names = List(Str, []) interpolator = Instance(InterpolatorView) # The default scalar to load up when running the viewer. scalar = Str("rho") scene = Instance(MlabSceneModel, ()) ######################################## # Traits to pull data from a live solver. live_mode = Bool(False, desc='if data is obtained from a running solver ' 'or from saved files') shell = Button('Launch Python Shell') host = Str('localhost', desc='machine to connect to') port = Int(8800, desc='port to use to connect to solver') authkey = Password('pysph', desc='authorization key') host_changed = Bool(True) client = Instance(MultiprocessingClient) controller = Property(depends_on='live_mode, host_changed') ######################################## # Traits to view saved solver output. files = List(Str, []) directory = Directory() current_file = Str('', desc='the file being viewed currently') update_files = Button('Refresh') file_count = Range(low='_low', high='_n_files', value=0, desc='the file counter') play = Bool(False, desc='if all files are played automatically') play_delay = Float(0.2, desc='the delay between loading files') loop = Bool(False, desc='if the animation is looped') # This is len(files) - 1. _n_files = Int(0) _low = Int(0) ######################################## # Timer traits. timer = Instance(Timer) interval = Range(0.5, 20.0, 2.0, desc='frequency in seconds with which plot is updated') ######################################## # Solver info/control. current_time = Float(0.0, desc='the current time in the simulation') time_step = Float(0.0, desc='the time-step of the solver') iteration = Int(0, desc='the current iteration number') pause_solver = Bool(False, desc='if the solver should be paused') ######################################## # Movie. record = Bool(False, desc='if PNG files are to be saved for animation') frame_interval = Range(1, 100, 5, desc='the interval between screenshots') movie_directory = Str # internal counters. _count = Int(0) _frame_count = Int(0) _last_time = Float _solver_data = Any _file_name = Str _particle_array_updated = Bool ######################################## # The layout of the dialog created view = View(HSplit( Group( Group( Group( Item(name='directory'), Item(name='current_file'), Item(name='file_count'), HGroup(Item(name='play'), Item(name='play_delay', label='Delay', resizable=True), Item(name='loop'), Item(name='update_files', show_label=False), padding=0), padding=0, label='Saved Data', selected=True, enabled_when='not live_mode', ), Group( Item(name='live_mode'), Group( Item(name='host'), Item(name='port'), Item(name='authkey'), enabled_when='live_mode', ), label='Connection', ), layout='tabbed' ), Group( Group( Item(name='current_time'), Item(name='time_step'), Item(name='iteration'), Item(name='pause_solver', enabled_when='live_mode' ), Item(name='interval', enabled_when='not live_mode' ), label='Solver', ), Group( Item(name='record'), Item(name='frame_interval'), Item(name='movie_directory'), label='Movie', ), layout='tabbed', ), Group( Item(name='particle_arrays', style='custom', show_label=False, editor=ListEditor(use_notebook=True, deletable=False, page_name='.name' ) ), Item(name='interpolator', style='custom', show_label=False), layout='tabbed' ), Item(name='shell', show_label=False), ), Group( Item('scene', editor=SceneEditor(scene_class=MayaviScene), height=400, width=600, show_label=False), ) ), resizable=True, title='PySPH Particle Viewer', height=640, width=1024, handler=ViewerHandler ) ###################################################################### # `MayaviViewer` interface. ###################################################################### def on_close(self): self._handle_particle_array_updates() @on_trait_change('scene:activated') def start_timer(self): if not self.live_mode: # No need for the timer if we are rendering files. return # Just accessing the timer will start it. t = self.timer if not t.IsRunning(): t.Start(int(self.interval*1000)) @on_trait_change('scene:activated') def update_plot(self): # No need to do this if files are being used. if not self.live_mode: return # do not update if solver is paused if self.pause_solver: return if self.client is None: self.host_changed = True controller = self.controller if controller is None: return self.current_time = t = controller.get_t() self.time_step = controller.get_dt() self.iteration = controller.get_count() arrays = [] for idx, name in enumerate(self.pa_names): pa = controller.get_named_particle_array(name) arrays.append(pa) pah = self.particle_arrays[idx] pah.set(particle_array=pa, time=t) self.interpolator.particle_arrays = arrays if self.record: self._do_snap() def run_script(self, path): """Execute a script in the namespace of the viewer. """ with open(path) as fp: data = fp.read() ns = self._get_shell_namespace() exec(compile(data, path, 'exec'), ns) ###################################################################### # Private interface. ###################################################################### def _do_snap(self): """Generate the animation.""" p_arrays = self.particle_arrays if len(p_arrays) == 0: return if self.current_time == self._last_time: return if len(self.movie_directory) == 0: controller = self.controller output_dir = controller.get_output_directory() movie_dir = os.path.join(output_dir, 'movie') self.movie_directory = movie_dir else: movie_dir = self.movie_directory if not os.path.exists(movie_dir): os.mkdir(movie_dir) interval = self.frame_interval count = self._count if count % interval == 0: fname = 'frame%06d.png' % (self._frame_count) p_arrays[0].scene.save_png(os.path.join(movie_dir, fname)) self._frame_count += 1 self._last_time = self.current_time self._count += 1 @on_trait_change('host,port,authkey') def _mark_reconnect(self): if self.live_mode: self.host_changed = True @cached_property def _get_controller(self): ''' get the controller, also sets the iteration count ''' if not self.live_mode: return None reconnect = self.host_changed if not reconnect: try: c = self.client.controller except Exception as e: logger.info('Error: no connection or connection closed: ' 'reconnecting: %s' % e) reconnect = True self.client = None else: try: self.client.controller.get_count() except IOError: self.client = None reconnect = True if reconnect: self.host_changed = False try: if MultiprocessingClient.is_available((self.host, self.port)): self.client = MultiprocessingClient( address=(self.host, self.port), authkey=self.authkey ) else: logger.info( 'Could not connect: Multiprocessing Interface' ' not available on %s:%s' % (self.host, self.port) ) return None except Exception as e: logger.info('Could not connect: check if solver is ' 'running:%s' % e) return None c = self.client.controller self.iteration = c.get_count() if self.client is None: return None else: return self.client.controller def _client_changed(self, old, new): if not self.live_mode: return self._clear() if new is None: return else: self.pa_names = self.client.controller.get_particle_array_names() self.particle_arrays = [ self._make_particle_array_helper(self.scene, x) for x in self.pa_names ] self.interpolator = InterpolatorView(scene=self.scene) # Turn on the legend for the first particle array. if len(self.particle_arrays) > 0: self.particle_arrays[0].set(show_legend=True, show_time=True) def _timer_event(self): # catch all Exceptions else timer will stop try: self.update_plot() except Exception as e: logger.info('Exception: %s caught in timer_event' % e) def _interval_changed(self, value): t = self.timer if t is None: return if t.IsRunning(): t.Stop() t.Start(int(value*1000)) def _timer_default(self): return Timer(int(self.interval*1000), self._timer_event) def _pause_solver_changed(self, value): if self.live_mode: c = self.controller if c is None: return if value: c.pause_on_next() else: c.cont() def _record_changed(self, value): if value: self._do_snap() def _files_changed(self, value): if len(value) == 0: return else: d = os.path.dirname(os.path.abspath(value[0])) self.movie_directory = os.path.join(d, 'movie') self.set(directory=d, trait_change_notify=False) self._n_files = len(value) - 1 self._frame_count = 0 self._count = 0 self.frame_interval = 1 fc = self.file_count self.file_count = 0 if fc == 0: # Force an update when our original file count is 0. self._file_count_changed(fc) t = self.timer if not self.live_mode: if t.IsRunning(): t.Stop() else: if not t.IsRunning(): t.Stop() t.Start(self.interval*1000) def _file_count_changed(self, value): # Save out any updates for the previous file if needed. self._handle_particle_array_updates() # Load the new file. fname = self.files[value] self._file_name = fname self.current_file = os.path.basename(fname) # Code to read the file, create particle array and setup the helper. data = load(fname) solver_data = data["solver_data"] arrays = data["arrays"] self._solver_data = solver_data self.current_time = t = float(solver_data['t']) self.time_step = float(solver_data['dt']) self.iteration = int(solver_data['count']) names = list(arrays.keys()) pa_names = self.pa_names if len(pa_names) == 0: self.interpolator = InterpolatorView(scene=self.scene) self.pa_names = names pas = [] for name in names: pa = arrays[name] pah = self._make_particle_array_helper(self.scene, name) # Must set this after setting the scene. pah.set(particle_array=pa, time=t) pas.append(pah) self.particle_arrays = pas else: for idx, name in enumerate(pa_names): pa = arrays[name] pah = self.particle_arrays[idx] pah.set(particle_array=pa, time=t) self.interpolator.particle_arrays = list(arrays.values()) if self.record: self._do_snap() def _loop_changed(self, value): if value and self.play: self._play_changed(self.play) def _play_changed(self, value): t = self.timer if value: t.Stop() t.callable = self._play_event t.Start(1000*self.play_delay) else: t.Stop() t.callable = self._timer_event def _clear(self): self.pa_names = [] self.scene.mayavi_scene.children[:] = [] def _play_event(self): nf = self._n_files pc = self.file_count pc += 1 if pc > nf: if self.loop: pc = 0 else: self.timer.Stop() pc = nf self.file_count = pc self._handle_particle_array_updates() def _play_delay_changed(self): if self.play: self._play_changed(self.play) def _scalar_changed(self, value): for pa in self.particle_arrays: pa.scalar = value def _update_files_fired(self): fc = self.file_count files = glob_files(self.files[fc]) sort_file_list(files) self.files = files self.file_count = fc if self.play: self._play_changed(self.play) def _shell_fired(self): ns = self._get_shell_namespace() obj = PythonShellView(ns=ns) obj.edit_traits() def _get_shell_namespace(self): return dict(viewer=self, particle_arrays=self.particle_arrays, interpolator=self.interpolator, scene=self.scene, mlab=self.scene.mlab) def _directory_changed(self, d): ext = os.path.splitext(self.files[-1])[1] files = glob.glob(os.path.join(d, '*' + ext)) if len(files) > 0: self._clear() sort_file_list(files) self.files = files self.file_count = min(self.file_count, len(files)) else: pass def _live_mode_changed(self, value): if value: self._file_name = '' self.client = None self._clear() self._mark_reconnect() self.start_timer() else: self.client = None self._clear() self.timer.Stop() def _particle_array_helper_updated(self, value): self._particle_array_updated = True def _handle_particle_array_updates(self): # Called when the particle array helper fires an updated event. if self._particle_array_updated and self._file_name: sd = self._solver_data arrays = [x.particle_array for x in self.particle_arrays] detailed = self._requires_detailed_output(arrays) dump(self._file_name, arrays, sd, detailed_output=detailed, only_real=False) self._particle_array_updated = False def _requires_detailed_output(self, arrays): detailed = False for pa in arrays: props = set(pa.properties.keys()) output = set(pa.output_property_arrays) diff = props - output for prop in diff: array = pa.get(prop) if (array.max() - array.min()) > 0: detailed = True break if detailed: break return detailed def _make_particle_array_helper(self, scene, name): pah = ParticleArrayHelper(scene=scene, name=name, scalar=self.scalar) pah.on_trait_change(self._particle_array_helper_updated, 'updated') return pah
class FooContainer(HasTraits): not_adapting_foo = Instance(Foo) adapting_foo = Instance(Foo, adapt="yes") not_adapting_foo_list = List(Foo) adapting_foo_list = List(Instance(Foo, adapt="yes"))
class ViewDockPane(TraitsDockPane): """ A DockPane to manipulate the traits of the currently selected view. """ #### TaskPane interface ############################################### id = 'edu.mit.synbio.view_traits_pane' name = 'View Properties' # the Task that serves as the controller task = Instance(Task) # the IViewPlugins that the user can possibly choose. set by the controller # as we're instantiated view_plugins = List(IViewPlugin) # changed depending on whether the selected wi in the model is valid. enabled = Bool(False) # the currently selected view id selected_view = Str # if there is a default view for the currently selected operation, this # is its view id default_view = Str # task actions associated with views _actions = Dict(Str, TaskAction) # the default task action _default_action = Instance(TaskAction) _window = Instance(QtGui.QMainWindow) def create_contents(self, parent): """ Create and return the toolkit-specific contents of the dock pane. """ self.toolbar = ToolBarManager(orientation='vertical', show_tool_names=False, image_size=(32, 32)) self._default_action = TaskAction( name="Setup View", on_perform=lambda s=self: self.trait_set(selected_view=s. default_view), image=ImageResource('setup'), style='toggle', visible=False) self.toolbar.append(self._default_action) for plugin in self.plugins: task_action = TaskAction(name=plugin.short_name, on_perform=lambda view_id=plugin.view_id: self.trait_set(selected_view=view_id), image=plugin.get_icon(), style='toggle') self._actions[plugin.view_id] = task_action self.toolbar.append(task_action) self._window = window = QtGui.QMainWindow() window.addToolBar(QtCore.Qt.RightToolBarArea, self.toolbar.create_tool_bar(window)) self.ui = self.model.edit_traits(view='selected_view_traits', kind='subpanel', parent=window) window.setCentralWidget(self.ui.control) window.setParent(parent) parent.setWidget(window) window.setEnabled(False) return window @on_trait_change('enabled', enabled) def _enabled_changed(self, enabled): self._window.setEnabled(enabled) self.ui.control.setEnabled(enabled) @on_trait_change('default_view') def set_default_view(self, obj, name, old_view_id, new_view_id): if old_view_id: del self._actions[old_view_id] if new_view_id: self._actions[new_view_id] = self._default_action self._default_action.visible = (new_view_id is not "") @on_trait_change('selected_view') def _selected_view_changed(self, view_id): # untoggle everything on the toolbar for action in self._actions.values(): action.checked = False # toggle the right button if view_id: self._actions[view_id].checked = True
class CSVListEditorDemo(HasTraits): list1 = List(Int) list2 = List(Float) list3 = List(Str, maxlen=3) list4 = List(Enum('red', 'green', 'blue', 2, 3)) list5 = List(Range(low=0.0, high=10.0)) # 'low' and 'high' are used to demonstrate lists containing dynamic ranges. low = Float(0.0) high = Float(1.0) list6 = List(Range(low=-1.0, high='high')) list7 = List(Range(low='low', high='high')) pop1 = Button("Pop from first list") sort1 = Button("Sort first list") # This will be str(self.list1). list1str = Property(Str, depends_on='list1') traits_view = View( HGroup( # This VGroup forms the column of CSVListEditor examples. VGroup( Item('list1', label="List(Int)", editor=CSVListEditor(ignore_trailing_sep=False), tooltip='options: ignore_trailing_sep=False'), Item('list1', label="List(Int)", style='readonly', editor=CSVListEditor()), Item('list2', label="List(Float)", editor=CSVListEditor(enter_set=True, auto_set=False), tooltip='options: enter_set=True, auto_set=False'), Item('list3', label="List(Str, maxlen=3)", editor=CSVListEditor()), Item('list4', label="List(Enum('red', 'green', 'blue', 2, 3))", editor=CSVListEditor(sep=None), tooltip='options: sep=None'), Item('list5', label="List(Range(low=0.0, high=10.0))", editor=CSVListEditor()), Item('list6', label="List(Range(low=-1.0, high='high'))", editor=CSVListEditor()), Item('list7', label="List(Range(low='low', high='high'))", editor=CSVListEditor()), springy=True, ), # This VGroup forms the right column; it will display the # Python str representation of the lists. VGroup( UItem('list1str', editor=TextEditor(), enabled_when='False', width=240), UItem('list1str', editor=TextEditor(), enabled_when='False', width=240), UItem('list2', editor=TextEditor(), enabled_when='False', width=240), UItem('list3', editor=TextEditor(), enabled_when='False', width=240), UItem('list4', editor=TextEditor(), enabled_when='False', width=240), UItem('list5', editor=TextEditor(), enabled_when='False', width=240), UItem('list6', editor=TextEditor(), enabled_when='False', width=240), UItem('list7', editor=TextEditor(), enabled_when='False', width=240), ), ), '_', HGroup('low', 'high', spring, UItem('pop1'), UItem('sort1')), Heading("Notes"), Label("Hover over a list to see which editor options are set, " "if any."), Label("The editor of the first list, List(Int), uses " "ignore_trailing_sep=False, so a trailing comma is " "an error."), Label("The second list is a read-only view of the first list."), Label("The editor of the List(Float) example has enter_set=True " "and auto_set=False; press Enter to validate."), Label("The List(Str) example will accept at most 3 elements."), Label("The editor of the List(Enum(...)) example uses sep=None, " "i.e. whitespace acts as a separator."), Label("The last three List(Range(...)) examples take neither, one or " "both of their limits from the Low and High fields below."), width=720, title="CSVListEditor Demonstration", ) def _list1_default(self): return [1, 4, 0, 10] def _get_list1str(self): return str(self.list1) def _pop1_fired(self): if len(self.list1) > 0: x = self.list1.pop() print(x) def _sort1_fired(self): self.list1.sort()
class IWorkbench(Interface): """ The workbench interface. """ # 'IWorkbench' interface ----------------------------------------------- # The active workbench window (the last one to get focus). active_window = Instance(WorkbenchWindow) # The optional application scripting manager. script_manager = Instance("apptools.appscripting.api.IScriptManager") # A directory on the local file system that we can read and write to at # will. This is used to persist window layout information, etc. state_location = Str() # The optional undo manager. undo_manager = Instance("pyface.undo.api.IUndoManager") # The user defined perspectives manager. user_perspective_manager = Instance(UserPerspectiveManager) # All of the workbench windows created by the workbench. windows = List(WorkbenchWindow) # Workbench lifecycle events ---- # Fired when the workbench is about to exit. # # This can be caused by either:- # # a) The 'exit' method being called. # b) The last open window being closed. exiting = VetoableEvent() # Fired when the workbench has exited. # # This is fired after the last open window has been closed. exited = Event() # Window lifecycle events ---- # Fired when a workbench window has been created. window_created = Event(WindowEvent) # Fired when a workbench window is opening. window_opening = Event(VetoableWindowEvent) # Fired when a workbench window has been opened. window_opened = Event(WindowEvent) # Fired when a workbench window is closing. window_closing = Event(VetoableWindowEvent) # Fired when a workbench window has been closed. window_closed = Event(WindowEvent) # ------------------------------------------------------------------------ # 'IWorkbench' interface. # ------------------------------------------------------------------------ def create_window(self, **kw): """ Factory method that creates a new workbench window. """ def edit(self, obj, kind=None, use_existing=True): """ Edit an object in the active workbench window. """ def exit(self): """ Exit the workbench. This closes all open workbench windows. This method is not called when the user clicks the close icon. Nor when they do an Alt+F4 in Windows. It is only called when the application menu File->Exit item is selected. """ def get_editor(self, obj, kind=None): """ Return the editor that is editing an object. Returns None if no such editor exists. """ def get_editor_by_id(self, id): """ Return the editor with the specified Id.
class DCBTestModel(BMCSModel, Vis2D): #========================================================================= # Tree node attributes #========================================================================= node_name = 'double cantilever beam simulation' tree_node_list = List([]) def _tree_node_list_default(self): return [ self.tline, self.mats_eval, self.cross_section, self.geometry, ] def _update_node_list(self): self.tree_node_list = [ self.tline, self.mats_eval, self.cross_section, self.geometry, ] #========================================================================= # Interactive control of the time loop #========================================================================= def init(self): self.tloop.init() def eval(self): return self.tloop.eval() def pause(self): self.tloop.paused = True def stop(self): self.tloop.restart = True #========================================================================= # Test setup parameters #========================================================================= loading_scenario = Instance(LoadingScenario) def _loading_scenario_default(self): return LoadingScenario() cross_section = Instance(CrossSection) def _cross_section_default(self): return CrossSection() geometry = Instance(Geometry) def _geometry_default(self): return Geometry() #========================================================================= # Discretization #========================================================================= n_e_x = Int(2, auto_set=False, enter_set=True) n_e_y = Int(8, auto_set=False, enter_set=True) n_e_z = Int(1, auto_set=False, enter_set=True) w_max = Float(0.01, BC=True, auto_set=False, enter_set=True) #========================================================================= # Material model #========================================================================= mats_eval_type = Trait('scalar damage', { 'elastic': MATS3DElastic, 'scalar damage': MATS3DScalarDamage, 'microplane damage (eeq)': MATS3DMplDamageEEQ, 'microplane damage (odf)': MATS3DMplDamageODF, }, MAT=True) @on_trait_change('mats_eval_type') def _set_mats_eval(self): self.mats_eval = self.mats_eval_type_() @on_trait_change('BC,MAT,MESH') def reset_node_list(self): self._update_node_list() mats_eval = Instance(IMATSEval, MAT=True) '''Material model''' def _mats_eval_default(self): return self.mats_eval_type_() material = Property def _get_material(self): return self.mats_eval #========================================================================= # Finite element type #========================================================================= fets_eval = Property(Instance(FETS3D8H), depends_on='CS,MAT') '''Finite element time stepper implementing the corrector predictor operators at the element level''' @cached_property def _get_fets_eval(self): return FETS3D8H() bcond_mngr = Property(Instance(BCondMngr), depends_on='CS,BC,MESH') '''Boundary condition manager ''' @cached_property def _get_bcond_mngr(self): bc_list = [ self.fixed_left_x, self.fixed_top_y, self.link_right_cs, self.control_bc, ] + self.link_right_x return BCondMngr(bcond_list=bc_list) fixed_left_x = Property(depends_on='CS, BC,GEO,MESH') '''Foxed boundary condition''' @cached_property def _get_fixed_left_x(self): a_L = self.geometry.a / self.geometry.L n_a = int(a_L * self.n_e_y) print('n_a', n_a) return BCSlice(slice=self.fe_grid[0, n_a:, :, 0, -1, :], var='u', dims=[0], value=0) fixed_top_y = Property(depends_on='CS, BC,GEO,MESH') '''Foxed boundary condition''' @cached_property def _get_fixed_top_y(self): return BCSlice(slice=self.fe_grid[:, -1, :, :, -1, :], var='u', dims=[1, 2], value=0) link_right_cs = Property(depends_on='CS,BC,GEO,MESH') '''Foxed boundary condition''' @cached_property def _get_link_right_cs(self): f_dof = self.fe_grid[-1, :, -1, -1, :, -1] b_dof = self.fe_grid[-1, :, 0, -1, :, 0] return BCSlice(name='link_cs', slice=f_dof, link_slice=b_dof, dims=[0], link_coeffs=[1], value=0) link_right_x = Property(depends_on='CS,BC,GEO,MESH') '''Foxed boundary condition''' @cached_property def _get_link_right_x(self): top = self.fe_grid[-1, -1, 0, -1, -1, 0] bot = self.fe_grid[-1, 0, 0, -1, 0, 0] linked = self.fe_grid[-1, 1:, 0, -1, 0, 0] Ty = top.dof_X[0, 0, 1] By = bot.dof_X[0, 0, 1] Ly = linked.dof_X[:, :, 1].flatten() H = Ty - By link_ratios = Ly / H top_dof = top.dofs[0, 0, 0] bot_dof = bot.dofs[0, 0, 0] linked_dofs = linked.dofs[:, :, 0].flatten() bcdof_list = [] for linked_dof, link_ratio in zip(linked_dofs, link_ratios): link_bc = BCDof(var='u', dof=linked_dof, value=0, link_dofs=[bot_dof, top_dof], link_coeffs=[1 - link_ratio, link_ratio]) bcdof_list.append(link_bc) return bcdof_list control_bc = Property(depends_on='CS,BC,GEO,MESH') '''Foxed boundary condition''' @cached_property def _get_control_bc(self): return BCSlice( #slice=self.fe_grid[0, 0, :, 0, :, :], slice=self.fe_grid[-1, 0, 0, -1, 0, 0], var='u', dims=[0], value=self.w_max) dots_grid = Property(Instance(DOTSGrid), depends_on='CS,MAT,GEO,MESH,FE') '''Discretization object. ''' @cached_property def _get_dots_grid(self): cs = self.cross_section geo = self.geometry print(self.mats_eval) return DOTSGrid(L_x=cs.h, L_y=geo.L, L_z=cs.b, n_x=self.n_e_x, n_y=self.n_e_y, n_z=self.n_e_z, fets=self.fets_eval, mats=self.mats_eval) fe_grid = Property def _get_fe_grid(self): return self.dots_grid.mesh tline = Instance(TLine) def _tline_default(self): t_max = 1.0 d_t = 0.1 return TLine( min=0.0, step=d_t, max=t_max, time_change_notifier=self.time_changed, ) k_max = Int(200, ALG=True) tolerance = Float(1e-4, ALG=True) tloop = Property(Instance(TimeLoop), depends_on='MAT,GEO,MESH,CS,TIME,ALG,BC') '''Algorithm controlling the time stepping. ''' @cached_property def _get_tloop(self): k_max = self.k_max tolerance = self.tolerance return TimeLoop(ts=self.dots_grid, k_max=k_max, tolerance=tolerance, tline=self.tline, bc_mngr=self.bcond_mngr) def get_PW(self): record_dofs = np.unique(self.fe_grid[-1, :, :, -1, :, :].dofs[:, :, 0].flatten()) # record_dofs = np.unique( # self.fe_grid[0, 0, :, 0, :, :].dofs[:, :, 0].flatten() # ) Fd_int_t = np.array(self.tloop.F_int_record) Ud_t = np.array(self.tloop.U_record) F_int_t = np.sum(Fd_int_t[:, record_dofs], axis=1) U_t = Ud_t[:, record_dofs[0]] return F_int_t, U_t viz2d_classes = { 'F-w': Viz2DForceDeflectionX, 'load function': Viz2DLoadControlFunction, } traits_view = View(Item('mats_eval_type'), ) tree_view = traits_view
class SimDBClassExt(HasStrictTraits): category = Enum('matdata', 'exdata') def _category_default(self): return 'matdata' path = List([]) # dictionary of predefined instances - used for # debugging and early stages of class developmemnt. # klass = Type # Verbose mode either none, or upon write/save pickle file # or upon deletion of pickle files. verbose = Enum('none', 'io', 'del') classname = Property(depends_on='klass') @cached_property def _get_classname(self): return self.klass.__name__ field_names = Property def _get_field_names(self): ''' Get the dictionary of factors provided by the simulation model. The factors are identified by the factor_levels metadata in the trait definition. For example my_param = Float( 20, factor_levels = (0, 10, 6) ) specifies a float factor with the levels [0,2,4,6,8,10] ''' traits = self.klass.class_traits(simdb=lambda x: x != None) return list(traits.keys()) constants = Dict({}) keyed_constants = Property(List) def _get_keyed_constants(self): for key, c in list(self.constants.items()): c.key = key return self.constants dirname = Str def _dirname_default(self): '''Name of the directory for the data of the class ''' klass_dir = self.klass.__name__ full_path = (simdb.simdb_dir, self.category) \ + tuple(self.path) + (klass_dir,) path = os.path.join(*full_path) return path dir = Directory() def _dir_default(self): '''Directory for the data of the class ''' # foolproof creation of the directory try: os.makedirs(self.dirname) except OSError: if os.path.exists(self.dirname): # We are nearly safe pass else: # There was an error on creation, so make sure we know about it raise return self.dirname instances = Dict def _instances_default(self): '''Read the content of the directory ''' instances = {} for obj_file_name in os.listdir(self.dir): # check to see whether the file is pickle or not path = os.path.join(self.dir, obj_file_name) if not os.path.isfile(path): continue obj_file = open(path, 'r') key_list = string.split(obj_file_name, '.')[:-1] key = reduce(lambda x, y: x + '.' + y, key_list) if self.verbose == 'io': print('%s.db: reading %s' % (self.klass.__name__, key)) try: instances[key] = pickle.load(obj_file) except ImportError as e: print('file name %s' % obj_file) raise ImportError(e) # let the object know its key print(key) instances[key].key = key obj_file.close() return instances inst_list = Property def _get_inst_list(self): return list(self.keyed_constants.values()) + list( self.instances.values()) selected_instance = Instance(SimDBClass) def _selected_instance_default(self): if len(self.inst_list) > 0: return self.inst_list[0] else: return None def keys(self): return list(self.keyed_constants.keys()) + list(self.instances.keys()) def get(self, name, Missing): it = self.keyed_constants.get(name, Missing) if it == Missing: it = self.instances.get(name, Missing) return it def __setitem__(self, key, value): ''' Save the instance with the specified key. ''' # check if the key corresponds to a constant # if yes, report an error if key in list(self.keys()): raise IndexError('an object with key %s already exists' % key) it = self.keyed_constants.get(key, None) if it: raise ValueError('attempting to change a constant %s' % key) else: self.save_item(key, value) # register the object in the memory as well self.instances[key] = value def save_item(self, key, value): for x in string.whitespace: key = key.replace(x, "_") # write to the database # value.key = key obj_file_name = os.path.join(self.dir, key + '.pickle') obj_file = open(obj_file_name, 'w') if self.verbose == 'io': print('%s.db: writing %s' % (self.klass.__name__, key)) pickle.dump(value, obj_file, protocol=0) # slow text mode obj_file.close() def __getitem__(self, key): ''' Return the instance with the specified key. ''' it = self.keyed_constants.get(key, None) if it == None: it = self.instances.get(key, None) if it == None: raise ValueError( 'No database object with the key %s for class %s' % (key, self.classname)) return it def __delitem__(self, key): # check if the key corresponds to a constant # if yes, report an error it = self.keyed_constants.get(key, None) if it: raise ValueError('attempting to delete a constant %s' % key) else: for x in string.whitespace: key = key.replace(x, "_") # write to the database obj_file_name = os.path.join(self.dir, key + '.pickle') if os.path.exists(obj_file_name): os.remove(obj_file_name) del self.instances[key] def delete_instances(self): for key in list(self.instances.keys()): self.__delitem__(key) #------------------------------------------------------------------------- # VIEW #------------------------------------------------------------------------- traits_view = View( HSplit( VSplit( VGroup( HGroup( Item('classname', style='readonly', label='database extension class')), Item('inst_list', editor=simdb_table_editor, show_label=False, style='custom'), label='database table', id='simbd.table.instances', dock='tab', ), id='simdb.table.left', ), VGroup( VGroup( Item('selected_instance@', resizable=True, show_label=False), label='instance', id='simdb.table.instance', dock='tab', scrollable=True, ), id='simdb.table.right', layout='split', label='selected instance', dock='tab', ), id='simdb.table.splitter', ), id='simdb.table', dock='tab', resizable=True, buttons=['OK', 'Cancel'], height=0.8, width=0.8, )
class FileDialog(Dialog): """ A dialog widget that allows the user to open/save files. """ #: The mode of the dialog: 'open' or 'save' mode = Enum('open', 'save') #: Whether to allow selecting multiple files in 'open' mode. multi_select = Bool(False) #: The current directory of the file dialog. directory = Unicode #: The file selected in the dialog. filename = Unicode #: A read-only property which returns the full path to the file, #: or the first file in the selection if multi_select is True. path = Property(Unicode, depends_on=['directory', 'filename']) #: A read-only property which returns a list of selected paths. paths = Property(List(Unicode), depends_on='_paths') #: The private internal storage for the 'paths' property. This is #: updated by the toolkit specific backends. _paths = List(Unicode) #: The string filters used to restrict the set of files. filters = List(Unicode) #: The selected filter from the list of filters. selected_filter = Enum(values='filters') #: Overridden parent class trait abstract_obj = Instance(AbstractTkFileDialog) #-------------------------------------------------------------------------- # Property Getters #-------------------------------------------------------------------------- @cached_property def _get_path(self): """ The property getter for the 'path' attribute. """ return os.path.join(self.directory, self.filename) @cached_property def _get_paths(self): """ The property getter for the 'paths' attribute. """ return self._paths #-------------------------------------------------------------------------- # Parent Class Overrides #-------------------------------------------------------------------------- def add_subcomponent(self, component): """ An overriden parent class method which prevents subcomponents from being declared for a FileDialog instance. """ msg = "Cannot add subcomponents to a FileDialog." raise ValueError(msg)
class MComboField(HasTraits): #: The current text value of the combobox. value = Enum(values='values') #: The list of available values for the combobox. values = List(minlen=1) #: Callable that converts a value to text plus an optional icon. #: Should return either a uncode string or a tuple of image resource #: and string. formatter = Callable(text_type, allow_none=False) # ------------------------------------------------------------------------ # object interface # ------------------------------------------------------------------------ def __init__(self, values, **traits): value = traits.pop('value', values[0]) traits['values'] = values super(MComboField, self).__init__(**traits) self.value = value # ------------------------------------------------------------------------ # Private interface # ------------------------------------------------------------------------ def _initialize_control(self): super(MComboField, self)._initialize_control() self._set_control_values(self.values) self._set_control_value(self.value) def _add_event_listeners(self): """ Set up toolkit-specific bindings for events """ super(MComboField, self)._add_event_listeners() self.on_trait_change(self._values_updated, 'values[],formatter', dispatch='ui') if self.control is not None: self._observe_control_value() def _remove_event_listeners(self): """ Remove toolkit-specific bindings for events """ if self.control is not None: self._observe_control_value(remove=True) self.on_trait_change(self._values_updated, 'values[],formatter', dispatch='ui', remove=True) super(MComboField, self)._remove_event_listeners() # Toolkit control interface --------------------------------------------- def _get_control_text_values(self): """ Toolkit specific method to get the control's text values. """ raise NotImplementedError def _set_control_values(self, values): """ Toolkit specific method to set the control's values. """ raise NotImplementedError # Trait change handlers -------------------------------------------------- def _values_updated(self): if self.control is not None: self._set_control_values(self.values)
class AgePreferredValue(PreferredValue): kind = Str(PLATEAU_ELSE_WEIGHTED_MEAN) kinds = List(AGE_SUBGROUPINGS)
class Editor(HasPrivateTraits): """ Represents an editing control for an object trait in a Traits-based user interface. """ #: The UI (user interface) this editor is part of: ui = Instance("traitsui.ui.UI", clean_up=True) #: Full name of the object the editor is editing (e.g. #: 'object.link1.link2'): object_name = Str("object") #: The object this editor is editing (e.g. object.link1.link2): object = Instance(HasTraits, clean_up=True) #: The name of the trait this editor is editing (e.g. 'value'): name = ReadOnly #: The context object the editor is editing (e.g. object): context_object = Property #: The extended name of the object trait being edited. That is, #: 'object_name.name' minus the context object name at the beginning. For #: example: 'link1.link2.value': extended_name = Property #: Original value of object.name (e.g. object.link1.link2.value): old_value = Any(clean_up=True) #: Text description of the object trait being edited: description = ReadOnly #: The Item object used to create this editor: item = Instance(Item, (), clean_up=True) #: The GUI widget defined by this editor: control = Any(clean_up=True) #: The GUI label (if any) defined by this editor: label_control = Any(clean_up=True) #: Is the underlying GUI widget enabled? enabled = Bool(True) #: Is the underlying GUI widget visible? visible = Bool(True) #: Is the underlying GUI widget scrollable? scrollable = Bool(False) #: The EditorFactory used to create this editor: factory = Instance(EditorFactory, clean_up=True) #: Is the editor updating the object.name value? updating = Bool(False) #: Current value for object.name: value = Property #: Current value of object trait as a string: str_value = Property #: The trait the editor is editing (not its value, but the trait itself): value_trait = Property #: The current editor invalid state status: invalid = Bool(False) # -- private trait definitions ------------------------------------------ #: A set to track values being updated to prevent infinite recursion. _no_trait_update = Set(Str) #: A list of all values synchronized to. _user_to = List(Tuple(Any, Str, Callable)) #: A list of all values synchronized from. _user_from = List(Tuple(Str, Callable)) # ------------------------------------------------------------------------ # Editor interface # ------------------------------------------------------------------------ # -- Abstract methods --------------------------------------------------- def init(self, parent): """ Create and initialize the underlying toolkit widget. This method must be overriden by subclasses. Implementations must ensure that the :attr:`control` trait is set to an appropriate toolkit object. Parameters ---------- parent : toolkit control The parent toolkit object of the editor's toolkit objects. """ raise NotImplementedError("This method must be overriden.") def update_editor(self): """ Updates the editor when the value changes externally to the editor. This should normally be overridden in a subclass. """ pass def error(self, excp): """ Handles an error that occurs while setting the object's trait value. This should normally be overridden in a subclass. Parameters ---------- excp : Exception The exception which occurred. """ pass def set_focus(self): """ Assigns focus to the editor's underlying toolkit widget. This method must be overriden by subclasses. """ raise NotImplementedError("This method must be overriden.") def string_value(self, value, format_func=None): """ Returns the text representation of a specified object trait value. This simply delegates to the factory's `string_value` method. Sub-classes may choose to override the default implementation. Parameters ---------- value : any The value being edited. format_func : callable or None A function that takes a value and returns a string. """ return self.factory.string_value(value, format_func) def restore_prefs(self, prefs): """ Restores saved user preference information for the editor. Editors with state may choose to override this. It will only be used if the editor has an `id` value. Parameters ---------- prefs : dict A dictionary of preference values. """ pass def save_prefs(self): """ Returns any user preference information for the editor. Editors with state may choose to override this. It will only be used if the editor has an `id` value. Returns ------- prefs : dict or None A dictionary of preference values, or None if no preferences to be saved. """ return None # -- Editor life-cycle methods ------------------------------------------ def prepare(self, parent): """ Finish setting up the editor. Parameters ---------- parent : toolkit control The parent toolkit object of the editor's toolkit objects. """ name = self.extended_name if name != "None": self.context_object.on_trait_change( self._update_editor, name, dispatch="ui" ) self.init(parent) self._sync_values() self.update_editor() def dispose(self): """ Disposes of the contents of an editor. This disconnects any synchronised values and resets references to other objects. Subclasses may chose to override this method to perform additional clean-up. """ if self.ui is None: return name = self.extended_name if name != "None": self.context_object.on_trait_change( self._update_editor, name, remove=True ) for name, handler in self._user_from: self.on_trait_change(handler, name, remove=True) for object, name, handler in self._user_to: object.on_trait_change(handler, name, remove=True) # Break linkages to references we no longer need: for name in self.trait_names(clean_up=True): setattr(self, name, None) # -- Undo/redo methods -------------------------------------------------- def log_change(self, undo_factory, *undo_args): """ Logs a change made in the editor with undo/redo history. Parameters ---------- undo_factory : callable Callable that creates an undo item. Often self.get_undo_item. *undo_args Any arguments to pass to the undo factory. """ # Indicate that the contents of the user interface have been changed: ui = self.ui ui.modified = True # Create an undo history entry if we are maintaining a history: undoable = ui._undoable if undoable >= 0: history = ui.history if history is not None: item = undo_factory(*undo_args) if item is not None: if undoable == history.now: # Create a new undo transaction: history.add(item) else: # Extend the most recent undo transaction: history.extend(item) def get_undo_item(self, object, name, old_value, new_value): """ Creates an undo history entry. Can be overridden in a subclass for special value types. Parameters ---------- object : HasTraits instance The object being modified. name : str The name of the trait that is to be changed. old_value : any The original value of the trait. new_value : any The new value of the trait. """ return UndoItem( object=object, name=name, old_value=old_value, new_value=new_value ) # -- Trait synchronization code ----------------------------------------- def sync_value( self, user_name, editor_name, mode="both", is_list=False, is_event=False, ): """ Synchronize an editor trait and a user object trait. Also sets the initial value of the editor trait from the user object trait (for modes 'from' and 'both'), and the initial value of the user object trait from the editor trait (for mode 'to'), as long as the relevant traits are not events. Parameters ---------- user_name : str The name of the trait to be used on the user object. If empty, no synchronization will be set up. editor_name : str The name of the relevant editor trait. mode : str, optional; one of 'to', 'from' or 'both' The direction of synchronization. 'from' means that trait changes in the user object should be propagated to the editor. 'to' means that trait changes in the editor should be propagated to the user object. 'both' means changes should be propagated in both directions. The default is 'both'. is_list : bool, optional If true, synchronization for item events will be set up in addition to the synchronization for the object itself. The default is False. is_event : bool, optional If true, this method won't attempt to initialize the user object or editor trait values. The default is False. """ if user_name == "": return key = "%s:%s" % (user_name, editor_name) parts = user_name.split(".") if len(parts) == 1: user_object = self.context_object xuser_name = user_name else: user_object = self.ui.context[parts[0]] xuser_name = ".".join(parts[1:]) user_name = parts[-1] if mode in {"from", "both"}: self._bind_from(key, user_object, xuser_name, editor_name, is_list) if not is_event: # initialize editor value from user value with self.raise_to_debug(): user_value = xgetattr(user_object, xuser_name) setattr(self, editor_name, user_value) if mode in {"to", "both"}: self._bind_to(key, user_object, xuser_name, editor_name, is_list) if mode == "to" and not is_event: # initialize user value from editor value with self.raise_to_debug(): editor_value = xgetattr(self, editor_name) xsetattr(user_object, xuser_name, editor_value) # -- Utility methods ----------------------------------------------------- def parse_extended_name(self, name): """ Extract the object, name and a getter from an extended name Parameters ---------- name : str The extended name to parse. Returns ------- object, name, getter : any, str, callable The object from the context, the (extended) name of the attributes holding the value, and a callable which gets the current value from the context. """ base_name, __, name = name.partition(".") if name: object = self.ui.context[base_name] else: name = base_name object = self.context_object return (object, name, partial(xgetattr, object, name)) # -- Utility context managers -------------------------------------------- @contextmanager def no_trait_update(self, name): """ Context manager that blocks updates from the named trait. """ if name in self._no_trait_update: yield return self._no_trait_update.add(name) try: yield finally: self._no_trait_update.remove(name) @contextmanager def raise_to_debug(self): """ Context manager that uses raise to debug to raise exceptions. """ try: yield except Exception: from traitsui.api import raise_to_debug raise_to_debug() @contextmanager def updating_value(self): """ Context manager to handle updating value. """ if self.updating: yield return self.updating = True try: yield finally: self.updating = False # ------------------------------------------------------------------------ # object interface # ------------------------------------------------------------------------ def __init__(self, parent, **traits): """ Initializes the editor object. """ super(HasPrivateTraits, self).__init__(**traits) try: self.old_value = getattr(self.object, self.name) except AttributeError: ctrait = self.object.base_trait(self.name) if ctrait.type == "event" or self.name == "spring": # Getting the attribute will fail for 'Event' traits: self.old_value = Undefined else: raise # Synchronize the application invalid state status with the editor's: self.sync_value(self.factory.invalid, "invalid", "from") # ------------------------------------------------------------------------ # private methods # ------------------------------------------------------------------------ def _update_editor(self, object, name, old_value, new_value): """ Performs updates when the object trait changes. This is designed to be used as a trait listener. """ # If background threads have modified the trait the editor is bound to, # their trait notifications are queued to the UI thread. It is possible # that by the time the UI thread dispatches these events, the UI the # editor is part of has already been closed. So we need to check if we # are still bound to a live UI, and if not, exit immediately: if self.ui is None: return # If the notification is for an object different than the one actually # being edited, it is due to editing an item of the form: # object.link1.link2.name, where one of the 'link' objects may have # been modified. In this case, we need to rebind the current object # being edited: if object is not self.object: self.object = self.ui.get_extended_value(self.object_name) # If the editor has gone away for some reason, disconnect and exit: if self.control is None: self.context_object.on_trait_change( self._update_editor, self.extended_name, remove=True ) return # Log the change that was made (as long as the Item is not readonly # or it is not for an event): if ( self.item.style != "readonly" and object.base_trait(name).type != "event" ): self.log_change( self.get_undo_item, object, name, old_value, new_value ) # If the change was not caused by the editor itself: if not self.updating: # Update the editor control to reflect the current object state: self.update_editor() def _sync_values(self): """ Initialize and synchronize editor and factory traits Initializes and synchronizes (as needed) editor traits with the value of corresponding factory traits. The name of the factory trait and the editor trait must match and the factory trait needs to have ``sync_value`` metadata set. The strategy followed is: - for each factory trait with ``sync_value`` metadata: 1. if the value is a :class:`ContextValue` instance then call :meth:`sync_value` with the ``name`` from the context value. 2. if the trait has ``sync_name`` metadata, look at the referenced trait value and if it is a non-empty string then use this value as the name of the value in the context. 3. otherwise initialize the current value of the factory trait to the corresponding value of the editor. - synchronization mode in cases 1 and 2 is taken from the ``sync_value`` metadata of the editor trait first and then the ``sync_value`` metadata of the factory trait if that is empty. - if the value is a container type, then the `is_list` metadata is set to """ factory = self.factory for name, trait in factory.traits(sync_value=not_none).items(): value = getattr(factory, name) self_trait = self.trait(name) if self_trait.sync_value: mode = self_trait.sync_value else: mode = trait.sync_value if isinstance(value, ContextValue): self.sync_value( value.name, name, mode, bool(self_trait.is_list), self_trait.type == "event", ) elif ( trait.sync_name is not None and getattr(factory, trait.sync_name, "") != "" ): # Note: this is implemented as a stepping stone from things # like ``low_name`` and ``high_name`` to using context values. sync_name = getattr(factory, trait.sync_name) self.sync_value( sync_name, name, mode, bool(self_trait.is_list), self_trait.type == "event", ) elif value is not Undefined: setattr(self, name, value) def _bind_from(self, key, user_object, xuser_name, editor_name, is_list): """ Bind trait change handlers from a user object to the editor. Parameters ---------- key : str The key to use to guard against recursive updates. user_object : object The object in the TraitsUI context that is being bound. xuser_name: : str The extended name of the trait to be used on the user object. editor_name : str The name of the relevant editor trait. is_list : bool, optional If true, synchronization for item events will be set up in addition to the synchronization for the object itself. The default is False. """ def user_trait_modified(new): if key not in self._no_trait_update: with self.no_trait_update(key), self.raise_to_debug(): xsetattr(self, editor_name, new) user_object.on_trait_change(user_trait_modified, xuser_name) self._user_to.append((user_object, xuser_name, user_trait_modified)) if is_list: def user_list_modified(event): if ( isinstance(event, TraitListEvent) and key not in self._no_trait_update ): with self.no_trait_update(key), self.raise_to_debug(): n = event.index getattr(self, editor_name)[ n : n + len(event.removed) ] = event.added items = xuser_name + "_items" user_object.on_trait_change(user_list_modified, items) self._user_to.append((user_object, items, user_list_modified)) def _bind_to(self, key, user_object, xuser_name, editor_name, is_list): """ Bind trait change handlers from a user object to the editor. Parameters ---------- key : str The key to use to guard against recursive updates. user_object : object The object in the TraitsUI context that is being bound. xuser_name: : str The extended name of the trait to be used on the user object. editor_name : str The name of the relevant editor trait. is_list : bool, optional If true, synchronization for item events will be set up in addition to the synchronization for the object itself. The default is False. """ def editor_trait_modified(new): if key not in self._no_trait_update: with self.no_trait_update(key), self.raise_to_debug(): xsetattr(user_object, xuser_name, new) self.on_trait_change(editor_trait_modified, editor_name) self._user_from.append((editor_name, editor_trait_modified)) if is_list: def editor_list_modified(event): if key not in self._no_trait_update: with self.no_trait_update(key), self.raise_to_debug(): n = event.index value = xgetattr(user_object, xuser_name) value[n : n + len(event.removed)] = event.added self.on_trait_change(editor_list_modified, editor_name + "_items") self._user_from.append( (editor_name + "_items", editor_list_modified) ) def __set_value(self, value): """ Set the value of the trait the editor is editing. This calls the appropriate setattr method on the handler to perform the actual change. """ with self.updating_value(): try: handler = self.ui.handler obj_name = self.object_name name = self.name method = ( getattr(handler, "%s_%s_setattr" % (obj_name, name), None) or getattr(handler, "%s_setattr" % name, None) or getattr(handler, "setattr") ) method(self.ui.info, self.object, name, value) except TraitError as excp: self.error(excp) raise def _str(self, value): """ Returns the text representation of a specified value. This is a convenience method to cover the differences between Python 2 and Python 3 strings. Parameters ---------- value : any The value to be represented as a string. Returns ------- string : unicode The string of the value, as an appropriate text type for Python 2 or 3. """ # In Unicode! return six.text_type(value) # -- Traits property getters and setters -------------------------------- @cached_property def _get_context_object(self): """ Returns the context object the editor is using In some cases a proxy object is edited rather than an object directly in the context, in which case we return ``self.object``. """ object_name = self.object_name context_key = object_name.split(".", 1)[0] if (object_name != "") and (context_key in self.ui.context): return self.ui.context[context_key] # This handles the case of a 'ListItemProxy', which is not in the # ui.context, but is the editor 'object': return self.object @cached_property def _get_extended_name(self): """ Returns the extended trait name being edited. """ return ("%s.%s" % (self.object_name, self.name)).split(".", 1)[1] def _get_value_trait(self): """ Returns the trait the editor is editing (Property implementation). """ return self.object.trait(self.name) def _get_value(self): """ Returns the value of the trait the editor is editing. """ return getattr(self.object, self.name, Undefined) def _set_value(self, value): """ Set the value of the trait the editor is editing. Dispatches via the TraitsUI Undo/Redo mechanisms to make change reversible, if desired. """ if self.ui and self.name != "None": self.ui.do_undoable(self.__set_value, value) def _get_str_value(self): """ Returns the text representation of the object trait. """ return self.string_value(getattr(self.object, self.name, Undefined))
class BaseNDView(BaseDataView): """ A data view that plots data from one or more channels. Attributes ---------- channels : List(Str) The channels to view scale : Dict(Str : {"linear", "logicle", "log"}) Re-scale the data in the specified channels before plotting. If a channel isn't specified, assume that the scale is linear. """ channels = List(Str) scale = Dict(Str, util.ScaleEnum) def plot(self, experiment, **kwargs): """ Parameters ---------- lim : Dict(Str : (float, float)) Set the range of each channel's axis. If unspecified, assume that the limits are the minimum and maximum of the clipped data """ if experiment is None: raise util.CytoflowViewError('experiment', "No experiment specified") if len(self.channels) == 0: raise util.CytoflowOpError('channels', "Must set at least one channel") for c in self.channels: if c not in experiment.data: raise util.CytoflowOpError( 'channels', "Channel {0} not found in the experiment".format(c)) for c in self.scale: if c not in self.channels: raise util.CytoflowOpError( 'scale', "Scale set for channel {0}, but it isn't " "in 'channels'".format(c)) # get the scale scale = {} for c in self.channels: if c in self.scale: scale[c] = util.scale_factory(self.scale[c], experiment, channel=c) else: scale[c] = util.scale_factory(util.get_default_scale(), experiment, channel=c) lim = kwargs.pop("lim", {}) for c in self.channels: if c not in lim: lim[c] = None super().plot(experiment, lim=lim, scale=scale, **kwargs)
class ListModel(HasTraits): value = List()
class GaussianMixture2DOp(HasStrictTraits): """ This module fits a 2D Gaussian mixture model with a specified number of components to a pair of channels. .. warning:: :class:`GaussianMixture2DOp` is **DEPRECATED** and will be removed in a future release. It doesn't correctly handle the case where an event is present in more than one component. Please use :class:`GaussianMixtureOp` instead! Creates a new categorical metadata variable named :attr:`name`, with possible values ``name_1`` .... ``name_n`` where ``n`` is the number of components. An event is assigned to ``name_i`` category if it falls within :attr:`sigma` standard deviations of the component's mean. If that is true for multiple categories (or if :attr:`sigma` is ``0.0``), the event is assigned to the category with the highest posterior probability. If the event doesn't fall into any category, it is assigned to ``name_None``. As a special case, if :attr:`num_components` is ``1`` and :attr:`sigma` ``> 0.0``, then the new condition is boolean, ``True`` if the event fell in the gate and ``False`` otherwise. Optionally, if :attr:`posteriors` is ``True``, this module will also compute the posterior probability of each event in its assigned component, returning it in a new colunm named ``{Name}_Posterior``. Finally, the same mixture model (mean and standard deviation) may not be appropriate for every subset of the data. If this is the case, you can use the :attr:`by` attribute to specify metadata by which to aggregate the data before estimating (and applying) a mixture model. The number of components is the same across each subset, though. Attributes ---------- name : Str The operation name; determines the name of the new metadata column xchannel : Str The X channel to apply the mixture model to. ychannel : Str The Y channel to apply the mixture model to. xscale : {"linear", "logicle", "log"} (default = "linear") Re-scale the data on the X acis before fitting the data? yscale : {"linear", "logicle", "log"} (default = "linear") Re-scale the data on the Y axis before fitting the data? num_components : Int (default = 1) How many components to fit to the data? Must be positive. sigma : Float (default = 0.0) How many standard deviations on either side of the mean to include in each category? If an event is in multiple components, assign it to the component with the highest posterior probability. If :attr:`sigma` is ``0.0``, categorize *all* the data by assigning each event to the component with the highest posterior probability. Must be ``>= 0.0``. by : List(Str) A list of metadata attributes to aggregate the data before estimating the model. For example, if the experiment has two pieces of metadata, ``Time`` and ``Dox``, setting :attr:`by` to ``["Time", "Dox"]`` will fit the model separately to each subset of the data with a unique combination of ``Time`` and ``Dox``. posteriors : Bool (default = False) If ``True``, add a column named ``{Name}_Posterior`` giving the posterior probability that the event is in the component to which it was assigned. Useful for filtering out low-probability events. Examples -------- .. plot:: :context: close-figs Make a little data set. >>> import cytoflow as flow >>> import_op = flow.ImportOp() >>> import_op.tubes = [flow.Tube(file = "Plate01/RFP_Well_A3.fcs", ... conditions = {'Dox' : 10.0}), ... flow.Tube(file = "Plate01/CFP_Well_A4.fcs", ... conditions = {'Dox' : 1.0})] >>> import_op.conditions = {'Dox' : 'float'} >>> ex = import_op.apply() Create and parameterize the operation. .. plot:: :context: close-figs >>> gm_op = flow.GaussianMixture2DOp(name = 'Flow', ... xchannel = 'V2-A', ... xscale = 'log', ... ychannel = 'Y2-A', ... yscale = 'log', ... num_components = 2) Estimate the clusters .. plot:: :context: close-figs >>> gm_op.estimate(ex) Plot a diagnostic view with the distributions .. plot:: :context: close-figs >>> gm_op.default_view().plot(ex) Apply the gate .. plot:: :context: close-figs >>> ex2 = gm_op.apply(ex) Plot a diagnostic view with the event assignments .. plot:: :context: close-figs >>> gm_op.default_view().plot(ex2) """ id = Constant('edu.mit.synbio.cytoflow.operations.gaussian_2d') friendly_id = Constant("2D Gaussian Mixture") name = CStr() xchannel = Str() ychannel = Str() xscale = util.ScaleEnum yscale = util.ScaleEnum num_components = util.PositiveInt sigma = util.PositiveFloat(0.0, allow_zero = True) by = List(Str) posteriors = Bool(False) # the key is either a single value or a tuple _gmms = Dict(Any, Instance(mixture.GaussianMixture), transient = True) _xscale = Instance(util.IScale, transient = True) _yscale = Instance(util.IScale, transient = True) def estimate(self, experiment, subset = None): """ Estimate the Gaussian mixture model parameters. Parameters ---------- experiment : Experiment The data to use to estimate the mixture parameters subset : str (default = None) If set, a Python expression to determine the subset of the data to use to in the estimation. """ warn("GaussianMixture2DOp is DEPRECATED. Please use GaussianMixtureOp.", util.CytoflowOpWarning) if experiment is None: raise util.CytoflowOpError('experiment', "No experiment specified") if self.xchannel not in experiment.data: raise util.CytoflowOpError('xchannel', "Column {0} not found in the experiment" .format(self.xchannel)) if self.ychannel not in experiment.data: raise util.CytoflowOpError('ychannel', "Column {0} not found in the experiment" .format(self.ychannel)) for b in self.by: if b not in experiment.data: raise util.CytoflowOpError('by', "Aggregation metadata {} not found, " "must be one of {}" .format(b, experiment.conditions)) if self.num_components == 1 and self.posteriors: raise util.CytoflowOpError('posteriors', "If num_components == 1, all posteriors are 1.") if subset: try: experiment = experiment.query(subset) except Exception as e: raise util.CytoflowOpError('subset', "Subset string '{0}' isn't valid" .format(subset)) from e if len(experiment) == 0: raise util.CytoflowOpError('subset', "Subset string '{0}' returned no events" .format(subset)) if self.by: groupby = experiment.data.groupby(self.by) else: # use a lambda expression to return a group that contains # all the events groupby = experiment.data.groupby(lambda _: True) # get the scale. estimate the scale params for the ENTIRE data set, # not subsets we get from groupby(). And we need to save it so that # the data is transformed the same way when we apply() self._xscale = util.scale_factory(self.xscale, experiment, channel = self.xchannel) self._yscale = util.scale_factory(self.yscale, experiment, channel = self.ychannel) gmms = {} for group, data_subset in groupby: if len(data_subset) == 0: raise util.CytoflowOpError(None, "Group {} had no data" .format(group)) x = data_subset.loc[:, [self.xchannel, self.ychannel]] x[self.xchannel] = self._xscale(x[self.xchannel]) x[self.ychannel] = self._yscale(x[self.ychannel]) # drop data that isn't in the scale range x = x[~(np.isnan(x[self.xchannel]) | np.isnan(x[self.ychannel]))] x = x.values gmm = mixture.GaussianMixture(n_components = self.num_components, covariance_type = "full", random_state = 1) gmm.fit(x) if not gmm.converged_: raise util.CytoflowOpError(None, "Estimator didn't converge" " for group {0}" .format(group)) # in the 1D version, we sort the components by the means -- so # the first component has the lowest mean, the second component # has the next-lowest mean, etc. that doesn't work in a 2D area, # obviously. # instead, we assume that the clusters are likely (?) to be # arranged along *one* of the axes, so we take the |norm| of the # x,y mean of each cluster and sort that way. norms = (gmm.means_[:, 0] ** 2 + gmm.means_[:, 1] ** 2) ** 0.5 sort_idx = np.argsort(norms) gmm.means_ = gmm.means_[sort_idx] gmm.weights_ = gmm.weights_[sort_idx] gmm.covariances_ = gmm.covariances_[sort_idx] gmms[group] = gmm self._gmms = gmms def apply(self, experiment): """ Assigns new metadata to events using the mixture model estimated in :meth:`estimate`. Returns ------- Experiment A new :class:`.Experiment` with a column named :attr:`name` and optionally one named :attr:`name` ``_Posterior``. Also includes the following new statistics: - **xmean** : Float the mean of the fitted gaussian in the x dimension. - **ymean** : Float the mean of the fitted gaussian in the y dimension. - **proportion** : Float the proportion of events in each component of the mixture model. only set if :attr:`num_components` ``> 1``. PS -- if someone has good ideas for summarizing spread in a 2D (non-isotropic) Gaussian, or other useful statistics, let me know! """ warn("GaussianMixture2DOp is DEPRECATED. Please use GaussianMixtureOp.", util.CytoflowOpWarning) if experiment is None: raise util.CytoflowOpError('experiment', "No experiment specified") if not self.xchannel: raise util.CytoflowOpError('xchannel', "Must set X channel") if not self.ychannel: raise util.CytoflowOpError('ychannel', "Must set Y channel") # make sure name got set! if not self.name: raise util.CytoflowOpError('name', "You have to set the gate's name " "before applying it!") if self.name != util.sanitize_identifier(self.name): raise util.CytoflowOpError('name', "Name can only contain letters, numbers and underscores." .format(self.name)) if self.name in experiment.data.columns: raise util.CytoflowOpError('name', "Experiment already has a column named {0}" .format(self.name)) if not self._gmms: raise util.CytoflowOpError(None, "No components found. Did you forget to " "call estimate()?") if not self._xscale: raise util.CytoflowOpError(None, "Couldn't find _xscale. What happened??") if not self._yscale: raise util.CytoflowOpError(None, "Couldn't find _yscale. What happened??") if self.xchannel not in experiment.data: raise util.CytoflowOpError('xchannel', "Column {0} not found in the experiment" .format(self.xchannel)) if self.ychannel not in experiment.data: raise util.CytoflowOpError('ychannel', "Column {0} not found in the experiment" .format(self.ychannel)) if self.posteriors: col_name = "{0}_Posterior".format(self.name) if col_name in experiment.data: raise util.CytoflowOpError('channels', "Column {0} already found in the experiment" .format(col_name)) for b in self.by: if b not in experiment.data: raise util.CytoflowOpError('by', "Aggregation metadata {} not found, " "must be one of {}" .format(b, experiment.conditions)) if self.sigma < 0.0: raise util.CytoflowOpError('sigma', "sigma must be >= 0.0") event_assignments = pd.Series([None] * len(experiment), dtype = "object") if self.posteriors: event_posteriors = pd.Series([0.0] * len(experiment)) # what we DON'T want to do is iterate through event-by-event. # the more of this we can push into numpy, sklearn and pandas, # the faster it's going to be. for example, this is why # we don't use Ellipse.contains(). if self.by: groupby = experiment.data.groupby(self.by) else: # use a lambda expression to return a group that # contains all the events groupby = experiment.data.groupby(lambda _: True) for group, data_subset in groupby: if group not in self._gmms: # there weren't any events in this group, so we didn't get # a gmm. continue gmm = self._gmms[group] x = data_subset.loc[:, [self.xchannel, self.ychannel]] x[self.xchannel] = self._xscale(x[self.xchannel]) x[self.ychannel] = self._yscale(x[self.ychannel]) # which values are missing? x_na = np.isnan(x[self.xchannel]) | np.isnan(x[self.ychannel]) x_na = x_na.values x = x.values group_idx = groupby.groups[group] # make a preliminary assignment predicted = np.full(len(x), -1, "int") predicted[~x_na] = gmm.predict(x[~x_na]) # if we're doing sigma-based gating, for each component check # to see if the event is in the sigma gate. if self.sigma > 0.0: # make a quick dataframe with the value and the predicted # component gate_df = pd.DataFrame({"x" : x[:, 0], "y" : x[:, 1], "p" : predicted}) # for each component, get the ellipse that follows the isoline # around the mixture component # cf. http://scikit-learn.org/stable/auto_examples/mixture/plot_gmm.html # and http://www.mathworks.com/matlabcentral/newsreader/view_thread/298389 # and http://stackoverflow.com/questions/7946187/point-and-ellipse-rotated-position-test-algorithm # i am not proud of how many tries this took me to get right. for c in range(0, self.num_components): mean = gmm.means_[c] covar = gmm.covariances_[c] # xc is the center on the x axis # yc is the center on the y axis xc = mean[0] # @UnusedVariable yc = mean[1] # @UnusedVariable v, w = linalg.eigh(covar) u = w[0] / linalg.norm(w[0]) # xl is the length along the x axis # yl is the length along the y axis xl = np.sqrt(v[0]) * self.sigma # @UnusedVariable yl = np.sqrt(v[1]) * self.sigma # @UnusedVariable # t is the rotation in radians (counter-clockwise) t = 2 * np.pi - np.arctan(u[1] / u[0]) sin_t = np.sin(t) # @UnusedVariable cos_t = np.cos(t) # @UnusedVariable # and build an expression with numexpr so it evaluates fast! gate_bool = gate_df.eval("p == @c and " "((x - @xc) * @cos_t - (y - @yc) * @sin_t) ** 2 / ((@xl / 2) ** 2) + " "((x - @xc) * @sin_t + (y - @yc) * @cos_t) ** 2 / ((@yl / 2) ** 2) <= 1").values predicted[np.logical_and(predicted == c, gate_bool == False)] = -1 predicted_str = pd.Series(["(none)"] * len(predicted)) for c in range(0, self.num_components): predicted_str[predicted == c] = "{0}_{1}".format(self.name, c + 1) predicted_str[predicted == -1] = "{0}_None".format(self.name) predicted_str.index = group_idx event_assignments.iloc[group_idx] = predicted_str if self.posteriors: probability = np.full((len(x), self.num_components), 0.0, "float") probability[~x_na, :] = gmm.predict_proba(x[~x_na, :]) posteriors = pd.Series([0.0] * len(predicted)) for c in range(0, self.num_components): posteriors[predicted == c] = probability[predicted == c, c] posteriors.index = group_idx event_posteriors.iloc[group_idx] = posteriors new_experiment = experiment.clone() if self.num_components == 1 and self.sigma > 0: new_experiment.add_condition(self.name, "bool", event_assignments == "{0}_1".format(self.name)) elif self.num_components > 1: new_experiment.add_condition(self.name, "category", event_assignments) if self.posteriors and self.num_components > 1: col_name = "{0}_Posterior".format(self.name) new_experiment.add_condition(col_name, "float", event_posteriors) # add the statistics levels = list(self.by) if self.num_components > 1: levels.append(self.name) if levels: idx = pd.MultiIndex.from_product([new_experiment[x].unique() for x in levels], names = levels) xmean_stat = pd.Series(index = idx, dtype = np.dtype(object)).sort_index() ymean_stat = pd.Series(index = idx, dtype = np.dtype(object)).sort_index() prop_stat = pd.Series(index = idx, dtype = np.dtype(object)).sort_index() for group, _ in groupby: gmm = self._gmms[group] for c in range(self.num_components): if self.num_components > 1: component_name = "{}_{}".format(self.name, c + 1) if group is True: g = [component_name] elif isinstance(group, tuple): g = list(group) g.append(component_name) else: g = list([group]) g.append(component_name) if len(g) > 1: g = tuple(g) else: g = (g[0],) else: g = group xmean_stat.at[g] = self._xscale.inverse(gmm.means_[c][0]) ymean_stat.at[g] = self._yscale.inverse(gmm.means_[c][0]) prop_stat.at[g] = gmm.weights_[c] new_experiment.statistics[(self.name, "xmean")] = pd.to_numeric(xmean_stat) new_experiment.statistics[(self.name, "ymean")] = pd.to_numeric(ymean_stat) if self.num_components > 1: new_experiment.statistics[(self.name, "proportion")] = pd.to_numeric(prop_stat) new_experiment.history.append(self.clone_traits(transient = lambda _: True)) return new_experiment def default_view(self, **kwargs): """ Returns a diagnostic plot of the Gaussian mixture model. Returns ------- IView : an IView, call :meth:`~GaussianMixture2DView.plot` to see the diagnostic plot. """ warn("GaussianMixture1DOp is DEPRECATED. Please use GaussianMixtureOp.", util.CytoflowOpWarning) v = GaussianMixture2DView(op = self) v.trait_set(**kwargs) return v
class Session(HasTraits): """ An object representing the session between a client and its Enaml objects. The session object is what ensures that each client has their own individual instances of objects, so that the only state that is shared between simultaneously existing clients is that which is explicitly provided by the developer. """ #: The string identifier for this session. This is provided by #: the application when the session is opened. The value should #: not be manipulated by user code. session_id = ReadOnly #: The top level windows which are managed by this session. This #: should be populated by user code during the `on_open` method. windows = List(Window) #: The widget implementation groups which should be used by the #: widgets in this session. Widget groups are an advanced feature #: which allow the developer to selectively expose toolkit specific #: implementations of Enaml widgets. All standard Enaml widgets are #: available in the 'default' group. This value will rarely need to #: be changed by the user. widget_groups = List(Str, ['default']) #: A resource manager used for loading resources for the session. resource_manager = Instance(ResourceManager, ()) #: The socket used by this session for communication. This is #: provided by the Application when the session is activated. #: The value should not normally be manipulated by user code. socket = Instance(ActionSocketInterface) #: The current state of the session. This value is changed by the #: by the application as it drives the session through its lifetime. #: This should not be manipulated directly by user code. state = Enum( 'inactive', 'opening', 'opened', 'activating', 'active', 'closing', 'closed', ) #: A read-only property which is True if the session is inactive. is_inactive = Property(fget=lambda self: self.state == 'inactive') #: A read-only property which is True if the session is opening. is_opening = Property(fget=lambda self: self.state == 'opening') #: A read-only property which is True if the session is opened. is_opened = Property(fget=lambda self: self.state == 'opened') #: A read-only property which is True if the session is activating. is_activating = Property(fget=lambda self: self.state == 'activating') #: A read-only property which is True if the session is active. is_active = Property(fget=lambda self: self.state == 'active') #: A read-only property which is True if the session is closing. is_closing = Property(fget=lambda self: self.state == 'closing') #: A read-only property which is True if the session is closed. is_closed = Property(fget=lambda self: self.state == 'closed') #: A private dictionary of objects registered with this session. #: This value should not be manipulated by user code. _registered_objects = Instance(dict, ()) #: The private deferred message batch used for collapsing layout #: related messages into a single batch to send to the client #: session for more efficient handling. _batch = Instance(DeferredBatch) def __batch_default(self): batch = DeferredBatch() batch.triggered.connect(self._on_batch_triggered) return batch #-------------------------------------------------------------------------- # Class API #-------------------------------------------------------------------------- @classmethod def factory(cls, name='', description='', *args, **kwargs): """ Get a SessionFactory instance for this Session class. Parameters ---------- name : str, optional The name to use for the session instances. The default uses the class name. description : str, optional A human friendly description of the session. The default uses the class docstring. *args, **kwargs Any positional and keyword arguments to pass to the session when it is instantiated. """ from enaml.session_factory import SessionFactory if not name: name = cls.__name__ if not description: description = cls.__doc__ return SessionFactory(name, description, cls, *args, **kwargs) #-------------------------------------------------------------------------- # Private API #-------------------------------------------------------------------------- def _on_batch_triggered(self): """ A signal handler for the `triggered` signal on the deferred message batch. """ batch = [task() for task in self._batch.release()] content = {'batch': batch} self.send(self.session_id, 'message_batch', content) @on_trait_change('windows:destroyed') def _on_window_destroyed(self, obj, name, old, new): """ A trait handler for the `destroyed` event on the windows. This handler will remove a destroyed window from the list of the session's windows. """ self.windows.remove(obj) #-------------------------------------------------------------------------- # Abstract API #-------------------------------------------------------------------------- def on_open(self): """ Called by the application when the session is opened. This method must be implemented in a subclass and is called to create the Enaml objects for the session. This method will only be called once during the session lifetime. User code should create their windows and assign them to the list of `windows` before the method returns. """ raise NotImplementedError def on_close(self): """ Called by the application when the session is closed. This method may be optionally implemented by subclasses so that they can perform custom cleaup. After this method returns, the session should be considered invalid. This method is only called once during the session lifetime. """ pass #-------------------------------------------------------------------------- # Public API #-------------------------------------------------------------------------- def open(self, session_id): """ Called by the application to open the session. This method will call the `on_open` abstract method which must be implemented by subclasses. The method should never be called by user code. Parameters ---------- session_id : str The unique identifier to use for this session. """ self.session_id = session_id self.state = 'opening' self.on_open() for window in self.windows: window.initialize() self.state = 'opened' def activate(self, socket): """ Called by the application to activate the session and its windows. This method will be called by the Application once during the session lifetime. Once this method returns, the session and its objects will be ready to send and receive messages. This should never be called by user code. Parameters ---------- socket : ActionSocketInterface A concrete implementation of ActionSocketInterface to use for messaging by this session. """ self.state = 'activating' for window in self.windows: window.activate(self) self.socket = socket socket.on_message(self.on_message) self.state = 'active' def close(self): """ Called by the application when the session is closed. This method will call the `on_close` method which can optionally be implemented by subclasses. The method should never be called by user code. """ self.send(self.session_id, 'close', {}) self.state = 'closing' self.on_close() # The list is copied to avoid issues with the list changing size # while iterating. Windows are removed from the `windows` list # when they fire their `destroyed` event during destruction. for window in self.windows[:]: window.destroy() self.windows = [] self._registered_objects = {} self.socket.on_message(None) self.socket = None self.state = 'closed' def add_window(self, window): """ Add a window to the session's window list. This will add the window to the session and create the client side window if necessary. If the window already exists in the session, this is a no-op. Parameters ---------- window : Window A new window instance to add to the session. It will not normally have a parent, though this is not enforced. """ if window not in self.windows: self.windows.append(window) if self.is_active: window.initialize() # If the window has no parent, the client session must # be told to create it. Otherwise, the window's parent # will create it during the children changed event. if window.parent is None: content = {'window': window.snapshot()} self.send(self.session_id, 'add_window', content) window.activate(self) def snapshot(self): """ Get a snapshot of the windows of this session. Returns ------- result : list A list of snapshots representing the current windows for this session. """ return [window.snapshot() for window in self.windows] def register(self, obj): """ Register an object with the session. This method is called by an Object when it is activated by a Session. It should never be called by user code. Parameters ---------- obj : Object The object to register with the session. """ self._registered_objects[obj.object_id] = obj def unregister(self, obj): """ Unregister an object from the session. This method is called by an Object when it is being destroyed. It should never be called by user code. Parameters ---------- obj : Object The object to unregister from the session. """ self._registered_objects.pop(obj.object_id, None) #-------------------------------------------------------------------------- # Messaging API #-------------------------------------------------------------------------- def send(self, object_id, action, content): """ Send a message to a client object. This method is called by the `Object` instances owned by this session to send messages to their client implementations. Parameters ---------- object_id : str The object id of the client object. action : str The action that should be performed by the object. content : dict The content dictionary for the action. """ if self.is_active: self.socket.send(object_id, action, content) def batch(self, object_id, action, content): """ Batch a message to be sent by the session. This method can be called to add a message to an internal batch to be sent to the client at a later time. This is useful for queueing messages which are related and are emitted in rapid succession, such as `destroy` and `children_changed`. This can allow the client-side to batch update the ui, avoiding flicker and rendering artifacts. This method should be used with care. Parameters ---------- object_id : str The object id of the client object. action : str The action that should be performed by the object. content : dict The content dictionary for the action. """ task = lambda: (object_id, action, content) self._batch.append(task) def batch_task(self, object_id, action, task): """ Similar to `batch` but takes a callable task. Parameters ---------- object_id : str The object id of the client object. action : str The action that should be performed by the object. task : callable A callable which will be invoked at a later time to get the content of the message. The callable must return the content dictionary for the action. """ ctask = lambda: (object_id, action, task()) self._batch.append(ctask) def on_message(self, object_id, action, content): """ Receive a message sent to an object owned by this session. This is a handler method registered as the callback for the action socket. The message will be routed to the appropriate `Object` instance. Parameters ---------- object_id : str The object id of the target object. action : str The action that should be performed by the object. content : dict The content dictionary for the action. """ if self.is_active: if object_id == self.session_id: dispatch_action(self, action, content) else: try: obj = self._registered_objects[object_id] except KeyError: msg = "Invalid object id sent to Session: %s:%s" logger.warn(msg % (object_id, action)) return else: obj.receive_action(action, content) #-------------------------------------------------------------------------- # Action Handlers #-------------------------------------------------------------------------- def on_action_url_request(self, content): """ Handle the 'url_request' action from the client session. """ url = content['url'] metadata = content['metadata'] reply = URLReply(self, content['id'], url) self.resource_manager.load(url, metadata, reply)
class PeakCenterConfig(HasTraits): name = Str detectors = List(transient=True) detector = Str # detector_name = Str additional_detectors = List available_detectors = List isotope = Str('Ar40') isotopes = List(transient=True) dac = Float use_current_dac = Bool(True) integration_time = Enum(QTEGRA_INTEGRATION_TIMES) directions = Enum('Increase', 'Decrease', 'Oscillate') window = Float(0.015) step_width = Float(0.0005) min_peak_height = Float(1.0) percent = Int(80) use_interpolation = Bool interpolation_kind = Enum('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic') n_peaks = Enum(1, 2, 3, 4) select_n_peak = Int select_n_peaks = List use_dac_offset = Bool dac_offset = Float calculate_all_peaks = Bool def _integration_time_default(self): return QTEGRA_INTEGRATION_TIMES[4] # 1.048576 def _n_peaks_changed(self, new): self.select_n_peaks = [i + 1 for i in range(new)] if self.select_n_peak > new: self.select_n_peak = new def _detector_changed(self, new): if new: self.available_detectors = [d for d in self.detectors if d != new] def traits_view(self): degrp = VGroup(UItem('additional_detectors', style='custom', editor=CheckListEditor( name='available_detectors', cols=max(1, len(self.available_detectors)))), show_border=True, label='Additional Detectors') m_grp = VGroup(HGroup(Item('use_current_dac', label='Use Current DAC'), Item('dac', enabled_when='not use_current_dac')), Item('integration_time'), Item('directions'), Item('window', label='Peak Width (V)'), Item('step_width', label='Step Width (V)'), show_border=True, label='Measure') pp_grp = VGroup(Item('min_peak_height', label='Min Peak Height (fA)'), Item('percent', label='% Peak Height'), HGroup( Item('use_interpolation', label='Use Interpolation'), UItem('interpolation_kind', enabled_when='use_interpolation')), Item('n_peaks', label='Deconvolve N. Peaks'), Item('select_n_peak', editor=EnumEditor(name='select_n_peaks'), enabled_when='n_peaks>1', label='Select Peak'), HGroup( Item('use_dac_offset', label='DAC Offset'), UItem('dac_offset', enabled_when='use_dac_offset')), Item('calculate_all_peaks'), show_border=True, label='Post Process') v = View( VGroup( HGroup(Item('detector', editor=EnumEditor(name='detectors')), Item('isotope', editor=EnumEditor(name='isotopes'))), degrp, m_grp, pp_grp)) return v @property def active_detectors(self): return [self.detector] + self.additional_detectors
class PluginB(Plugin): id = 'B' x = List([1, 2, 3], contributes_to='x') y = List([4, 5, 6], contributes_to='x')
class DomainStateContainer(HasStrictTraits): '''Model of the spatial domain - base class approximations and discretizations. The XModel represents one subdomain within a spatial domain. ''' def __init__(self, subdomains, *args, **kw): super().__init__(*args, **kw) self.subdomains = subdomains self.serialized_subdomains changed_structure = Event subdomains = List(domain_changed=True) @on_trait_change('changed_structure') def _validate_subdomains(self): for domain in self.subdomains: domain.validate() serialized_subdomains = Property(depends_on='subdomains, subdomains_items') @cached_property def _get_serialized_subdomains(self): '''Link the new subdomain at the end of the series. ''' s = np.array(self.subdomains) for s1, s2 in zip(s[:-1], s[1:]): s1.xmodel.set_next(s2.xmodel) s2.xmodel.set_prev(s1.xmodel) return self.subdomains nonempty_subdomains = Property(depends_on='changed_structure') @cached_property def _get_nonempty_subdomains(self): d_list = [] for d in self.serialized_subdomains: if d.xmodel.n_active_elems > 0: d_list.append(d) return d_list n_dofs = Property def _get_n_dofs(self): '''Return the total number of dofs in the domain. Use the last subdomain's: dof_offset + n_dofs ''' last_d = self.serialized_subdomains[-1] dof_offset = last_d.xmodel.dof_offset n_dofs = last_d.xmodel.n_dofs return dof_offset + n_dofs dof_offset_arr = Property def _get_dof_offset_arr(self): ''' Return array of the dof offsets from serialized subdomains ''' a = np.array([domain.xmodel.dof_offset for domain in self.serialized_subdomains]) return a U_var_shape = Property def _get_U_var_shape(self): return (self.n_dofs,) def __iter__(self): return iter(self.subdomains) def __getitem__(self, idx): return self.serialized_subdomains[idx]
class PluginB(Plugin): id = 'B' x = List(Int, [1, 2, 3], contributes_to='x')
class RuleTableFilter(TableFilter): """ A table filter based on rules. """ #------------------------------------------------------------------------- # Trait definitions: #------------------------------------------------------------------------- # Overrides the default **name** trait name = 'Default rule-based filter' # List of the filter rules to be applied rules = List(GenericTableFilterRule) # Event fired when the contents of the filter have changed modified = Event # Persistence ID of the view view_id = Str('traitsui.table_filter.RuleTableFilter') # Sample object that the filter will apply to _object = Any # Map of trait names and default values _trait_values = Any #------------------------------------------------------------------------- # Traits view definitions: #------------------------------------------------------------------------- error_view = View( Item(label='A menu or rule based filter can only be created for ' 'tables with at least one entry' ), title='Error Creating Filter', kind='livemodal', close_result=False, buttons=['Cancel'] ) #------------------------------------------------------------------------- # Returns whether a specified object meets the filter/search criteria: # (Should normally be overridden) #------------------------------------------------------------------------- def filter(self, object): """ Returns whether a specified object meets the filter or search criteria. """ is_first = is_true = True for rule in self.rules: if rule.and_or == 'or': if is_true and (not is_first): return True is_true = True if is_true: is_true = rule.is_true(object) is_first = False return is_true #------------------------------------------------------------------------- # Returns a user readable description of what kind of object will # satisfy the filter: # (Should normally be overridden): #------------------------------------------------------------------------- def description(self): """ Returns a user-readable description of the kind of object that satisfies the filter. """ ors = [] ands = [] if len(self.rules) > 0: for rule in self.rules: if rule.and_or == 'or': if len(ands) > 0: ors.append(' and '.join(ands)) ands = [] ands.append(rule.description()) if len(ands) > 0: ors.append(' and '.join(ands)) if len(ors) == 1: return ors[0] if len(ors) > 1: return ' or '.join(['(%s)' % t for t in ors]) return super(RuleTableFilter, self).description() #------------------------------------------------------------------------- # Edits the contents of the filter: #------------------------------------------------------------------------- def edit_view(self, object): """ Return a view to use for editing the filter. The ''object'' parameter is a sample object for the table that the filter will be applied to. It is supplied in case the filter needs to extract data or metadata from the object. If the table is empty, the ''object'' argument is None. """ self._object = object if object is None: return self.edit_traits(view='error_view') names = object.editable_traits() self._trait_values = object.get(names) return View( [['name{Filter name}', '_'], [Item('rules', id='rules_table', editor=self._get_table_editor(names)), '|<>']], id=self.view_id, title='Edit Filter', kind='livemodal', resizable=True, buttons=['OK', 'Cancel'], width=0.4, height=0.3) #------------------------------------------------------------------------- # Returns a table editor to use for editing the filter: #------------------------------------------------------------------------- def _get_table_editor(self, names): """ Returns a table editor to use for editing the filter. """ from .api import TableEditor return TableEditor(columns=generic_table_filter_rule_columns, orientation='vertical', deletable=True, sortable=False, configurable=False, auto_size=False, auto_add=True, row_factory=GenericTableFilterRule, row_factory_kw={ 'filter': self, 'name_editor': EnumEditor(values=names)}) #------------------------------------------------------------------------- # Returns the state to be pickled (override of object): #------------------------------------------------------------------------- def __getstate__(self): """ Returns the state to be pickled. This definition overrides **object**. """ dict = self.__dict__.copy() if '_object' in dict: del dict['_object'] del dict['_trait_values'] return dict #------------------------------------------------------------------------- # Handles the 'rules' trait being changed: #------------------------------------------------------------------------- def _rules_changed(self, rules): """ Handles a change to the **rules** trait. """ for rule in rules: rule.filter = self
class PluginC(Plugin): id = 'C' x = List(Int, [4, 5, 6], contributes_to='x')
class WfManagerReviewTask(Task): """Task responsible for running the Workflow and displaying the results.""" #: Top pane containing the analysis in table form side_pane = Instance(ResultsPane) #: Main pane containing the graphs central_pane = Instance(DataViewPane) #: The menu bar for this task. menu_bar = Instance(SMenuBar) #: The tool bars for this task. tool_bars = List(SToolBar) id = "force_wfmanager.wfmanager_review_task" name = "Review" #: Workflow model used to create the review in the analysis model. #: This trait no longer tracks the workflow stored under #: :attr:`setup_task.workflow_model` and instead is only updated #: when a new run is started. workflow_model = Instance(Workflow, allow_none=True) #: Analysis model. Contains the results that are displayed in the plot #: and table analysis_model = Instance(AnalysisModel, allow_none=False) #: Is the results saving button enabled, i.e. are there results? export_results_enabled = Bool(False) #: Setup Task setup_task = Instance(Task) def _menu_bar_default(self): """A menu bar with functions relevant to the Review task. Functions associated to the shared methods are located at the application level.""" return SMenuBar(id='mymenu') def _tool_bars_default(self): return [ SToolBar( TaskAction( name="Setup Workflow", tooltip="Setup Workflow", image=ImageResource("outline_build_black_48dp"), method="switch_task", image_size=(64, 64), ), ), SToolBar( TaskAction( name="Open Project", tooltip="Open a project containing a workflow and results", image=ImageResource("baseline_folder_open_black_48dp"), method="open_project", image_size=(64, 64), ), TaskAction( name="Save Project As", tooltip="Save results and workflow together as JSON", image=ImageResource("outline_save_black_48dp"), method="save_project_as", enabled_name="export_results_enabled", image_size=(64, 64), ), TaskAction( name="Export Results", tooltip="Export results table to a JSON or CSV file", image=ImageResource("baseline_save_black_48dp"), method="export_analysis_model_as", enabled_name="export_results_enabled", image_size=(64, 64), ), ), ] def create_central_pane(self): """ Creates the central pane which contains the analysis part (pareto front and output KPI values) """ central_pane = DataViewPane(analysis_model=self.analysis_model) self.central_pane = central_pane return central_pane def create_dock_panes(self): """ Creates the dock panes """ return [self.side_pane] # Default initialisers def _side_pane_default(self): return ResultsPane(analysis_model=self.analysis_model) def _default_layout_default(self): """ Defines the default layout of the task window """ return TaskLayout(top=PaneItem("force_wfmanager.results_pane")) def _workflow_model_default(self): return None def _analysis_model_default(self): return AnalysisModel() # Save AnalysisModel to file and sync its state @on_trait_change("analysis_model.export_enabled") def sync_export_enabled(self): self.export_results_enabled = self.analysis_model.export_enabled def export_analysis_model_as(self): """ Shows a dialog to save the :class:`AnalysisModel` as a JSON file. """ dialog = FileDialog( action="save as", default_filename="results.json", wildcard="JSON files (*.json)|*.json|CSV files (*.csv)|*.csv", ) result = dialog.open() if result is not OK: return False current_file = dialog.path return self._write_analysis(current_file) def _write_analysis(self, file_path): """ Write the contents of the analysis model to file. Parameters ---------- file_path (str) the name of the file to write to. Returns ------- bool: true if save was successful. """ try: self.analysis_model.write(file_path) except IOError as e: error( None, f"Cannot save in the requested file:\n\n{e}", "Error when saving the results table", ) log.exception("Error when saving AnalysisModel") return False except Exception as e: error( None, f"Cannot save the results table:\n\n{e}", "Error when saving results", ) log.exception("Error when saving results") return False else: self.current_file = file_path return True def save_project_as(self): """ Shows a dialog to save the current project as a JSON file. """ dialog = FileDialog( action="save as", default_filename="project.json", wildcard="JSON files (*.json)|*.json", ) result = dialog.open() if result is not OK: return False current_file = dialog.path if self._write_project(current_file): self.current_file = current_file return True return False def _write_project(self, file_path): """ Writes a JSON file that contains the :attr:`Workflow` and :attr:`AnalysisModel`. """ try: write_project_file(self.workflow_model, self.analysis_model, file_path) except IOError as e: error( None, "Cannot save in the requested file:\n\n{}".format(str(e)), "Error when saving the project", ) log.exception("Error when saving Project") return False except Exception as e: error( None, "Cannot save the Project:\n\n{}".format(str(e)), "Error when saving the project", ) log.exception("Error when saving the Project") return False else: return True def open_project(self): """ Shows a dialog to open a JSON file and load the contents into :attr:`Workflow` and :attr:`AnalysisModel`. """ dialog = FileDialog(action="open", wildcard="JSON files (*.json)|*.json") result = dialog.open() if result is not OK: return False current_file = dialog.path if self._load_project(current_file): return True return False def _load_project(self, file_path): """ Load contents of JSON file into:attr:`Workflow` and :attr:`AnalysisModel`. """ try: (analysis_model_dict, self.workflow_model) = load_project_file(self.factory_registry, file_path) # create two separate workflows, so that setup task can be # edited without changing the review task copy new_workflow = Workflow.from_json( self.factory_registry, self.workflow_model.__getstate__()) self.setup_task.workflow_model = new_workflow # share the analysis model with the setup_task self.analysis_model.from_json(analysis_model_dict) self.setup_task.analysis_model = self.analysis_model except IOError as e: error( None, "Unable to load file:\n\n{}".format(str(e)), "Error when loading project", ) log.exception("Error loading project file") return False except Exception as e: error( None, "Unable to load project:\n\n{}".format(str(e)), "Error when loading project", ) log.exception("Error when loading project") return False else: self.current_file = file_path return True # Synchronization with Window @on_trait_change("window.tasks") def sync_setup_task(self): if self.window is not None: for task in self.window.tasks: if task.id == "force_wfmanager.wfmanager_setup_task": self.setup_task = task self.analysis_model = self.setup_task.analysis_model @on_trait_change("setup_task.computation_running") def cache_running_workflow(self): """ When a new computation starts running, save a copy of the :attr:`setup_task.workflow_model` as :attr:workflow_model` that can be used when saving the results of the run alongside the workflow that created it. """ if self.setup_task.computation_running: self.workflow_model = Workflow.from_json( self.setup_task.factory_registry, self.setup_task.workflow_model.__getstate__(), ) # Menu/Toolbar Methods def switch_task(self): if self.setup_task is not None: self.window.activate_task(self.setup_task)
class Task(HasTraits): """ A collection of pane, menu, tool bar, and status bar factories. The central class in the Tasks plugin, a Task is responsible for describing a set of user interface elements, as well as mediating between its view (a TaskWindow) and an application-specific model. """ # The task's identifier. id = Str # The task's user-visible name. name = Unicode # The default layout to use for the task. If not overridden, only the # central pane is displayed. default_layout = Instance(TaskLayout, ()) # A list of extra IDockPane factories for the task. These dock panes are # used in conjunction with the dock panes returned by create_dock_panes(). extra_dock_pane_factories = List(Callable) # The window to which the task is attached. Set by the framework. window = Instance('pyface.tasks.task_window.TaskWindow') #### Actions ############################################################## # The menu bar for the task. menu_bar = Instance(MenuBarSchema) # The (optional) status bar for the task. status_bar = Instance(StatusBarManager) # The list of tool bars for the tasks. tool_bars = List(ToolBarSchema) # A list of extra actions, groups, and menus that are inserted into menu # bars and tool bars constructed from the above schemas. extra_actions = List(SchemaAddition) ########################################################################### # 'Task' interface. ########################################################################### def activated(self): """ Called after the task has been activated in a TaskWindow. """ pass def create_central_pane(self): """ Create and return the central pane, which must implement ITaskPane. """ raise NotImplementedError def create_dock_panes(self): """ Create and return the task's dock panes (IDockPane instances). This method is called *after* create_central_pane() when the task is added to a TaskWindow. """ return [] def initialized(self): """ Called when the task is about to be activated in a TaskWindow for the first time. Override this method to perform any initialization that requires the Task's panes to be instantiated. Note that this method, when called, is called before activated(). """ pass def prepare_destroy(self): """ Called when the task is about to be removed from its TaskWindow. Override this method to perform any cleanup before the task's controls are destroyed. """ pass
class ParticleArrayHelper(HasTraits): """ This class manages a particle array and sets up the necessary plotting related information for it. """ # The particle array we manage. particle_array = Instance(ParticleArray) # The name of the particle array. name = Str # Current time. time = Float(0.0) # The active scalar to view. scalar = Str('rho', desc='name of the active scalar to view') # The mlab scalar plot for this particle array. plot = Instance(PipelineBase) # The mlab vectors plot for this particle array. plot_vectors = Instance(PipelineBase) # List of available scalars in the particle array. scalar_list = List(Str) scene = Instance(MlabSceneModel) # Sync'd trait with the scalar lut manager. show_legend = Bool(False, desc='if the scalar legend is to be displayed') # Show all scalars. list_all_scalars = Bool(False, desc='if all scalars should be listed') # Sync'd trait with the dataset to turn on/off visibility. visible = Bool(True, desc='if the particle array is to be displayed') # Show the time of the simulation on screen. show_time = Bool(False, desc='if the current time is displayed') # Edit the scalars. edit_scalars = Button('More options ...') # Show vectors. show_vectors = Bool(False, desc='if vectors should be displayed') vectors = Str('u, v, w', enter_set=True, auto_set=False, desc='the vectors to display') mask_on_ratio = Int(3, desc='mask one in specified points') scale_factor = Float(1.0, desc='scale factor for vectors', enter_set=True, auto_set=False) edit_vectors = Button('More options ...') # Private attribute to store the Text module. _text = Instance(PipelineBase) # Extra scalars to show. These will be added and saved to the data if # needed. extra_scalars = List(Str) # Set to True when the particle array is updated with a new property say. updated = Event # Private attribute to store old value of visibility in case of empty # arrays. _old_visible = Bool(True) ######################################## # View related code. view = View( Item(name='name', show_label=False, editor=TitleEditor()), Group( Group( Group( Item(name='visible'), Item(name='show_legend'), Item(name='scalar', editor=EnumEditor(name='scalar_list')), Item(name='list_all_scalars'), Item(name='show_time'), columns=2, ), Item(name='edit_scalars', show_label=False), label='Scalars', ), Group( Item(name='show_vectors'), Item(name='vectors'), Item(name='mask_on_ratio'), Item(name='scale_factor'), Item(name='edit_vectors', show_label=False), label='Vectors', ), layout='tabbed' ) ) # Private protocol ############################################ def _add_vmag(self, pa): if 'vmag' not in pa.properties: if 'vmag2' in pa.output_property_arrays: vmag = numpy.sqrt(pa.get('vmag2', only_real_particles=False)) else: u, v, w = pa.get('u', 'v', 'w', only_real_particles=False) vmag = numpy.sqrt(u**2 + v**2 + w**2) pa.add_property(name='vmag', data=vmag) if len(pa.output_property_arrays) > 0: # We do not call add_output_arrays when the default is empty # as if it is empty, all arrays are saved anyway. However, # adding just vmag in this case will mean that when the # particle array is saved it will only save vmag! This is # not what we want, hence we add vmag *only* if the # output_property_arrays is non-zero length. pa.add_output_arrays(['vmag']) self.updated = True def _get_scalar(self, pa, scalar): """Return the requested scalar from the given particle array. """ if scalar in self.extra_scalars: method_name = '_add_' + scalar method = getattr(self, method_name) method(pa) return pa.get(scalar, only_real_particles=False) # Traits handlers ############################################# def _edit_scalars_fired(self): self.plot.edit_traits() def _edit_vectors_fired(self): self.plot_vectors.edit_traits() def _particle_array_changed(self, old, pa): self.name = pa.name self._list_all_scalars_changed(self.list_all_scalars) # Update the plot. x, y, z = pa.get('x', 'y', 'z', only_real_particles=False) s = self._get_scalar(pa, self.scalar) p = self.plot mlab = self.scene.mlab empty = len(x) == 0 if old is None: old_empty = True else: old_x = old.get('x', only_real_particles=False) old_empty = len(old_x) == 0 if p is None and not empty: src = mlab.pipeline.scalar_scatter(x, y, z, s) p = mlab.pipeline.glyph(src, mode='point', scale_mode='none') p.actor.property.point_size = 6 scm = p.module_manager.scalar_lut_manager scm.set(show_legend=self.show_legend, use_default_name=False, data_name=self.scalar) self.sync_trait('visible', p, mutual=True) self.sync_trait('show_legend', scm, mutual=True) # set_arrays(p.mlab_source.m_data, pa) self.plot = p elif not empty: if len(x) == len(p.mlab_source.x): p.mlab_source.set(x=x, y=y, z=z, scalars=s) if self.plot_vectors: self._vectors_changed(self.vectors) else: if self.plot_vectors: u, v, w = self._get_vectors_for_plot(self.vectors) p.mlab_source.reset( x=x, y=y, z=z, scalars=s, u=u, v=v, w=w ) else: p.mlab_source.reset(x=x, y=y, z=z, scalars=s) p.mlab_source.update() if empty and not old_empty: if p is not None: src = p.parent.parent self._old_visible = src.visible src.visible = False if old_empty and not empty: if p is not None: p.parent.parent.visible = self._old_visible self._show_vectors_changed(self.show_vectors) # Setup the time. self._show_time_changed(self.show_time) def _scalar_changed(self, value): p = self.plot if p is not None: p.mlab_source.scalars = self._get_scalar( self.particle_array, value ) p.module_manager.scalar_lut_manager.data_name = value def _list_all_scalars_changed(self, list_all_scalars): pa = self.particle_array if list_all_scalars: sc_list = pa.properties.keys() self.scalar_list = sorted(set(sc_list + self.extra_scalars)) else: if len(pa.output_property_arrays) > 0: self.scalar_list = sorted( set(pa.output_property_arrays + self.extra_scalars) ) else: sc_list = pa.properties.keys() self.scalar_list = sorted(set(sc_list + self.extra_scalars)) def _show_time_changed(self, value): txt = self._text mlab = self.scene.mlab if value: if txt is not None: txt.visible = True elif self.plot is not None: mlab.get_engine().current_object = self.plot txt = mlab.text(0.01, 0.01, 'Time = 0.0', width=0.35) self._text = txt self._time_changed(self.time) else: if txt is not None: txt.visible = False def _get_vectors_for_plot(self, vectors): pa = self.particle_array comps = [x.strip() for x in vectors.split(',')] if len(comps) == 3: try: vec = pa.get(*comps, only_real_particles=False) except AttributeError: return None else: return vec def _vectors_changed(self, value): vec = self._get_vectors_for_plot(value) if vec is not None: self.plot.mlab_source.set( vectors=numpy.c_[vec[0], vec[1], vec[2]] ) def _show_vectors_changed(self, value): pv = self.plot_vectors if pv is not None: pv.visible = value elif self.plot is not None and value: self._vectors_changed(self.vectors) pv = self.scene.mlab.pipeline.vectors( self.plot.mlab_source.m_data, mask_points=self.mask_on_ratio, scale_factor=self.scale_factor ) self.plot_vectors = pv def _mask_on_ratio_changed(self, value): pv = self.plot_vectors if pv is not None: pv.glyph.mask_points.on_ratio = value def _scale_factor_changed(self, value): pv = self.plot_vectors if pv is not None: pv.glyph.glyph.scale_factor = value def _time_changed(self, value): txt = self._text if txt is not None: txt.text = 'Time = %.3e' % (value) def _extra_scalars_default(self): return ['vmag']
class MaskedTimeSamples(TimeSamples): """ Container for time data in `*.h5` format. This class loads measured data from h5 files and provides information about this data. It supports storing information about (in)valid samples and (in)valid channels It also serves as an interface where the data can be accessed (e.g. for use in a block chain) via the :meth:`result` generator. """ #: Index of the first sample to be considered valid. start = CLong(0, desc="start of valid samples") #: Index of the last sample to be considered valid. stop = Trait(None, None, CLong, desc="stop of valid samples") #: Channels that are to be treated as invalid. invalid_channels = List(desc="list of invalid channels") #: Channel mask to serve as an index for all valid channels, is set automatically. channels = Property(depends_on=['invalid_channels', 'numchannels_total'], desc="channel mask") #: Number of channels (including invalid channels), is set automatically. numchannels_total = CLong(0, desc="total number of input channels") #: Number of time data samples (including invalid samples), is set automatically. numsamples_total = CLong(0, desc="total number of samples per channel") #: Number of valid channels, is set automatically. numchannels = Property(depends_on = ['invalid_channels', \ 'numchannels_total'], desc="number of valid input channels") #: Number of valid time data samples, is set automatically. numsamples = Property(depends_on=['start', 'stop', 'numsamples_total'], desc="number of valid samples per channel") # internal identifier digest = Property( depends_on = ['basename', 'start', 'stop', \ 'calib.digest', 'invalid_channels']) traits_view = View([ 'name{File name}', ['start{From sample}', Item('stop', label='to', style='text'), '-'], 'invalid_channels{Invalid channels}', [ 'sample_freq~{Sampling frequency}', 'numchannels~{Number of channels}', 'numsamples~{Number of samples}', '|[Properties]' ], '|' ], title='Time data', buttons=OKCancelButtons) @cached_property def _get_digest(self): return digest(self) @cached_property def _get_basename(self): return path.splitext(path.basename(self.name))[0] @cached_property def _get_channels(self): if len(self.invalid_channels) == 0: return slice(0, None, None) allr = [ i for i in range(self.numchannels_total) if i not in self.invalid_channels ] return array(allr) @cached_property def _get_numchannels(self): if len(self.invalid_channels) == 0: return self.numchannels_total return len(self.channels) @cached_property def _get_numsamples(self): sli = slice(self.start, self.stop).indices(self.numsamples_total) return sli[1] - sli[0] @on_trait_change('basename') def load_data(self): #""" open the .h5 file and set attributes #""" if not path.isfile(self.name): # no file there self.numsamples_total = 0 self.numchannels_total = 0 self.sample_freq = 0 raise IOError("No such file: %s" % self.name) if self.h5f != None: try: self.h5f.close() except IOError: pass self.h5f = tables.open_file(self.name) self.data = self.h5f.root.time_data self.sample_freq = self.data.get_attr('sample_freq') (self.numsamples_total, self.numchannels_total) = self.data.shape def result(self, num=128): """ Python generator that yields the output block-wise. Parameters ---------- num : integer, defaults to 128 This parameter defines the size of the blocks to be yielded (i.e. the number of samples per block). Returns ------- Samples in blocks of shape (num, numchannels). The last block may be shorter than num. """ sli = slice(self.start, self.stop).indices(self.numsamples_total) i = sli[0] stop = sli[1] cal_factor = 1.0 if i >= stop: raise IOError("no samples available") if self.calib: if self.calib.num_mics == self.numchannels_total: cal_factor = self.calib.data[self.channels][newaxis] elif self.calib.num_mics == self.numchannels: cal_factor = self.calib.data[newaxis] else: raise ValueError("calibration data not compatible: %i, %i" % \ (self.calib.num_mics, self.numchannels)) while i < stop: yield self.data[i:min(i + num, stop)][:, self.channels] * cal_factor i += num
class FlowTask(Task): """ classdocs """ id = "edu.mit.synbio.cytoflowgui.flow_task" name = "Cytometry analysis" # the main workflow instance. model = Instance(Workflow) # the center pane workflow_pane = Instance(WorkflowDockPane) view_pane = Instance(ViewDockPane) help_pane = Instance(HelpDockPane) plot_params_pane = Instance(PlotParamsPane) # plugin lists, to setup the interface op_plugins = List(IOperationPlugin) view_plugins = List(IViewPlugin) menu_bar = SMenuBar(SMenu(TaskAction(name='Open...', method='on_open', accelerator='Ctrl+O'), TaskAction(name='Save', #image='save', method='on_save', accelerator='Ctrl+S'), TaskAction(name='Save As...', method='on_save_as', accelerator='Ctrl+e'), TaskAction(name='Save Plot...', method='on_export', accelerator='Ctrl+x'), TaskAction(name='Export Jupyter notebook...', method='on_notebook', accelerator='Ctrl+I'), # TaskAction(name='Preferences...', # method='on_prefs', # accelerator='Ctrl+P'), id='File', name='&File'), SMenu(TaskToggleGroup(), id = 'View', name = '&View'), SMenu(TaskAction(name = 'Report a problem....', method = 'on_problem'), TaskAction(name='About...', method='on_about'), id="Help", name ="&Help")) tool_bars = [ SToolBar(TaskAction(method='on_new', name = "New", tooltip='New workflow', image=ImageResource('new')), TaskAction(method='on_open', name = "Open", tooltip='Open a file', image=ImageResource('open')), TaskAction(method='on_save', name = "Save", tooltip='Save the current file', image=ImageResource('save')), TaskAction(method='on_export', name = "Save Plot", tooltip='Save the current plot', image=ImageResource('export')), TaskAction(method='on_notebook', name='Notebook', tooltip="Export to an Jupyter notebook...", image=ImageResource('jupyter')), TaskAction(method = "on_calibrate", name = "Calibrate FCS...", tooltip = "Calibrate FCS files", image = ImageResource('tasbe')), TaskAction(method = 'on_problem', name = "Report a bug...", tooltib = "Report a bug", image = ImageResource('bug')))] # TaskAction(method='on_prefs', # name = "Prefs", # tooltip='Preferences', # image=ImageResource('prefs')), # the file to save to if the user clicks "save" and has already clicked # "open" or "save as". filename = Unicode def initialized(self): if self.filename: self.open_file(self.filename) def activated(self): # if we're coming back from the TASBE task, re-load the saved # workflow if self.model.backup_workflow: self.model.workflow = self.model.backup_workflow self.model.backup_workflow = [] return # else, set up a new workflow # add the import op if not self.model.workflow: self.add_operation(ImportPlugin().id) self.model.selected = self.model.workflow[0] self.model.modified = False def _default_layout_default(self): return TaskLayout(left = VSplitter(PaneItem("edu.mit.synbio.cytoflowgui.workflow_pane", width = 350), PaneItem("edu.mit.synbio.cytoflowgui.help_pane", width = 350, height = 350)), right = VSplitter(PaneItem("edu.mit.synbio.cytoflowgui.view_traits_pane", width = 350), PaneItem("edu.mit.synbio.cytoflowgui.params_pane", width = 350, height = 350)), top_left_corner = 'left', bottom_left_corner = 'left', top_right_corner = 'right', bottom_right_corner = 'right') def create_central_pane(self): # set the toolbar image size # this isn't really the right place for this, but it's the only # place control passes back to user code before the toolbar # is created. dpi = self.window.control.physicalDpiX() self.tool_bars[0].image_size = (int(0.4 * dpi), int(0.4 * dpi)) return self.application.plot_pane def create_dock_panes(self): self.workflow_pane = WorkflowDockPane(model = self.model, plugins = self.op_plugins, task = self) self.view_pane = ViewDockPane(model = self.model, plugins = self.view_plugins, task = self) self.help_pane = HelpDockPane(view_plugins = self.view_plugins, op_plugins = self.op_plugins, task = self) self.plot_params_pane = PlotParamsPane(model = self.model, task = self) return [self.workflow_pane, self.view_pane, self.help_pane, self.plot_params_pane] def on_new(self): if self.model.modified: ret = confirm(parent = None, message = "Are you sure you want to discard the current workflow?", title = "Clear workflow?") if ret != YES: return self.filename = "" self.window.title = "Cytoflow" # clear the workflow self.model.workflow = [] # add the import op self.add_operation(ImportPlugin().id) # and select the operation self.model.selected = self.model.workflow[0] self.model.modified = False def on_open(self): """ Shows a dialog to open a file. """ if self.model.modified: ret = confirm(parent = None, message = "Are you sure you want to discard the current workflow?", title = "Clear workflow?") if ret != YES: return dialog = FileDialog(parent = self.window.control, action = 'open', wildcard = (FileDialog.create_wildcard("Cytoflow workflow", "*.flow") + ';' + #@UndefinedVariable FileDialog.create_wildcard("All files", "*"))) #@UndefinedVariable if dialog.open() == OK: self.open_file(dialog.path) self.filename = dialog.path self.window.title = "Cytoflow - " + self.filename def open_file(self, path): try: new_workflow = load_yaml(path) except yaml.parser.ParserError as e: error(None, "Parser error loading {} -- is it a Cytoflow file?\n\n{}" .format(path, str(e))) return except Exception as e: error(None, "{} loading {}: {}" .format(e.__class__.__name__, path, str(e))) return # a few things to take care of when reloading for wi_idx, wi in enumerate(new_workflow): # get wi lock wi.lock.acquire() # clear the wi status wi.status = "loading" # re-link the linked list. if wi_idx > 0: wi.previous_wi = new_workflow[wi_idx - 1] if wi_idx < len(new_workflow) - 1: wi.next_wi = new_workflow[wi_idx + 1] # check that the FCS files are all there wi = new_workflow[0] assert(wi.operation.id == "edu.mit.synbio.cytoflow.operations.import") missing_tubes = 0 for tube in wi.operation.tubes: file = pathlib.Path(tube.file) if not file.exists(): missing_tubes += 1 if missing_tubes == len(wi.operation.tubes): warning(self.window.control, "Cytoflow couldn't find any of the FCS files from that " "workflow. If they've been moved, please open one FCS " "file to show Cytoflow where they've been moved to.") dialog = FileDialog(parent = self.window.control, action = 'open', wildcard = (FileDialog.create_wildcard("FCS files", "*.fcs *.lmd"))) # @UndefinedVariable if dialog.open() == OK: # find the "best" file match -- ie, the one with the longest # tail match fcs_path = pathlib.Path(dialog.path).parts best_path_len = -1 for tube in wi.operation.tubes: tube_path = pathlib.Path(tube.file).parts for i in range(len(fcs_path)): if list(reversed(fcs_path))[:i] == list(reversed(tube_path))[:i] and i > best_path_len: best_path_len = i if best_path_len >= 0: for tube in wi.operation.tubes: tube_path = pathlib.Path(tube.file).parts new_path = fcs_path[:-1 * best_path_len] + tube_path[-1 * best_path_len :] tube.file = str(pathlib.Path(*new_path)) elif missing_tubes > 0: warning(self.window.control, "Cytoflow couldn't find some of the FCS files from that " "workflow. You'll need to re-load them from the Import " "operation.") # replace the current workflow with the one we just loaded if False: # for debugging the loading of things from .event_tracer import record_events with record_events() as container: self.model.workflow = new_workflow container.save_to_directory(os.getcwd()) else: self.model.workflow = new_workflow self.model.modified = False for wi in self.model.workflow: wi.lock.release() ret = confirm(parent = None, message = "Do you want to execute the workflow now?", title = "Run workflow?") if ret == YES: self.model.run_all() def on_save(self): """ Save the file to the previous filename """ if self.filename: save_yaml(self.model.workflow, self.filename) self.model.modified = False else: self.on_save_as() def on_save_as(self): dialog = DefaultFileDialog(parent = self.window.control, action = 'save as', default_suffix = "flow", wildcard = (FileDialog.create_wildcard("Cytoflow workflow", "*.flow") + ';' + #@UndefinedVariable FileDialog.create_wildcard("All files", "*"))) #@UndefinedVariable if dialog.open() == OK: save_yaml(self.model.workflow, dialog.path) self.filename = dialog.path self.model.modified = False self.window.title = "Cytoflow - " + self.filename @on_trait_change('model.modified', post_init = True) def _on_model_modified(self, val): if val: if not self.window.title.endswith("*"): self.window.title += "*" else: if self.window.title.endswith("*"): self.window.title = self.window.title[:-1] def on_export(self): task = next(x for x in self.window.tasks if x.id == 'edu.mit.synbio.cytoflowgui.export_task') self.window.activate_task(task) def on_calibrate(self): task = next(x for x in self.window.tasks if x.id == 'edu.mit.synbio.cytoflowgui.tasbe_task') self.window.activate_task(task) def on_notebook(self): """ Shows a dialog to export the workflow to an Jupyter notebook """ dialog = DefaultFileDialog(parent = self.window.control, action = 'save as', default_suffix = "ipynb", wildcard = (FileDialog.create_wildcard("Jupyter notebook", "*.ipynb") + ';' + #@UndefinedVariable FileDialog.create_wildcard("All files", "*"))) # @UndefinedVariable if dialog.open() == OK: save_notebook(self.model.workflow, dialog.path) def on_prefs(self): pass def on_problem(self): log = str(self._get_package_versions()) + "\n" + self.application.application_log.getvalue() msg = "The best way to report a problem is send an application log to " \ "the developers. You can do so by either sending us an email " \ "with the log in it, or saving the log to a file and filing a " \ "new issue on GitHub at " \ "https://github.com/bpteague/cytoflow/issues/new" dialog = ConfirmationDialog(message = msg, informative = "Which would you like to do?", yes_label = "Send an email...", no_label = "Save to a file...") if dialog.open() == NO: dialog = DefaultFileDialog(parent = self.window.control, action = 'save as', default_suffix = "log", wildcard = (FileDialog.create_wildcard("Log files", "*.log") + ';' + #@UndefinedVariable FileDialog.create_wildcard("All files", "*"))) #@UndefinedVariable if dialog.open() == OK: with open(dialog.path, 'w') as f: f.write(log) webbrowser.open_new_tab("https://github.com/bpteague/cytoflow/issues/new") return information(None, "I'll now try to open your email client and create a " "new message to the developer. Debugging logs are " "attached. Please fill out the template bug report and " "send -- thank you for reporting a bug!") log = self.application.application_log.getvalue() versions = ["{0} {1}".format(key, value) for key, value in self._get_package_versions().items()] body = """ Thank you for your bug report! Please fill out the following template. PLATFORM (Mac, PC, Linux, other): OPERATING SYSTEM (eg OSX 10.7, Windows 8.1): SEVERITY (Critical? Major? Minor? Enhancement?): DESCRIPTION: - What were you trying to do? - What happened? - What did you expect to happen? PACKAGE VERSIONS: {0} DEBUG LOG: {1} """.format(versions, log) mailto("*****@*****.**", subject = "Cytoflow bug report", body = body) def _get_package_versions(self): import sys from cytoflow import __version__ as cf_version from fcsparser import __version__ as fcs_version from pandas import __version__ as pd_version from numpy import __version__ as np_version from numexpr import __version__ as nxp_version from bottleneck import __version__ as btl_version from seaborn import __version__ as sns_version from matplotlib import __version__ as mpl_version from scipy import __version__ as scipy_version from sklearn import __version__ as skl_version from statsmodels import __version__ as stats_version from pyface import __version__ as pyf_version from envisage import __version__ as env_version from traits import __version__ as trt_version from traitsui import __version__ as trt_ui_version from yapf import __version__ as yapf_version from nbformat import __version__ as nb_version from yaml import __version__ as yaml_version return {"python" : sys.version, "cytoflow" : cf_version, "fcsparser" : fcs_version, "pandas" : pd_version, "numpy" : np_version, "numexpr" : nxp_version, "bottleneck" : btl_version, "seaborn" : sns_version, "matplotlib" : mpl_version, "scipy" : scipy_version, "scikit-learn" : skl_version, "statsmodels" : stats_version, "pyface" : pyf_version, "envisage" : env_version, "traits" : trt_version, "traitsui" : trt_ui_version, "nbformat" : nb_version, "yapf" : yapf_version, "yaml" : yaml_version} def on_about(self): versions = self._get_package_versions() text = ["<b>Cytoflow {0}</b>".format(versions['cytoflow']), "<p>"] ver_text = ["{0} {1}".format(key, value) for key, value in versions.items()] text.extend(ver_text) text.extend(["Icons from the <a href=http://tango.freedesktop.org>Tango Desktop Project</a>", "<a href=https://thenounproject.com/search/?q=setup&i=14287>Settings icon</a> by Paulo Sa Ferreira from <a href=https://thenounproject.com>The Noun Project</a>", "<a href=https://thenounproject.com/search/?q=processing&i=849831>Processing icon</a> by Gregor Cresnar from <a href=https://thenounproject.com>The Noun Project</a>", "<a href=http://www.freepik.com/free-photos-vectors/background>App icon from Starline - Freepik.com</a>", "Cuvette image from Wikimedia Commons user <a href=http://commons.wikimedia.org/wiki/File:Hellma_Large_cone_cytometry_cell.JPG>HellmaUSA</a>"]) dialog = AboutDialog(text = text, parent = self.window.control, title = "About", image = ImageResource('cuvette'), additions = text) dialog.open() @on_trait_change('model.selected', post_init = True) def _on_select_op(self, selected): if selected: self.view_pane.enabled = (selected is not None) self.view_pane.default_view = selected.default_view.id if selected.default_view else "" self.view_pane.selected_view = selected.current_view.id if selected.current_view else "" self.help_pane.help_id = selected.operation.id else: self.view_pane.enabled = False @on_trait_change('view_pane.selected_view', post_init = True) def _on_select_view(self, view_id): if not view_id: return # if we already have an instantiated view object, find it try: self.model.selected.current_view = next((x for x in self.model.selected.views if x.id == view_id)) except StopIteration: # else make the new view plugin = next((x for x in self.view_plugins if x.view_id == view_id)) view = plugin.get_view() self.model.selected.views.append(view) self.model.selected.current_view = view self.help_pane.help_id = view_id def add_operation(self, op_id): # first, find the matching plugin plugin = next((x for x in self.op_plugins if x.id == op_id)) # next, get an operation op = plugin.get_operation() # make a new workflow item wi = WorkflowItem(operation = op, deletable = (op_id != 'edu.mit.synbio.cytoflowgui.op_plugins.import')) # if the op has a default view, add it to the wi try: wi.default_view = op.default_view() wi.views.append(wi.default_view) wi.current_view = wi.default_view except AttributeError: pass # figure out where to add it if self.model.selected: idx = self.model.workflow.index(self.model.selected) + 1 else: idx = len(self.model.workflow) # the add_remove_items handler takes care of updating the linked list self.model.workflow.insert(idx, wi) # and make sure to actually select the new wi self.model.selected = wi
class SourceMixer(SamplesGenerator): """ Mixes the signals from several sources. """ #: List of :class:`~acoular.sources.SamplesGenerator` objects #: to be mixed. sources = List(Instance(SamplesGenerator, ())) #: Sampling frequency of the signal. sample_freq = Trait(SamplesGenerator().sample_freq) #: Number of channels. numchannels = Trait(SamplesGenerator().numchannels) #: Number of samples. numsamples = Trait(SamplesGenerator().numsamples) # internal identifier ldigest = Property(depends_on=[ 'sources.digest', ]) # internal identifier digest = Property(depends_on=['ldigest', '__class__']) traits_view = View(Item('sources', style='custom')) @cached_property def _get_ldigest(self): res = '' for s in self.sources: res += s.digest return res @cached_property def _get_digest(self): return digest(self) @on_trait_change('sources') def validate_sources(self): """ Validates if sources fit together. """ if self.sources: self.sample_freq = self.sources[0].sample_freq self.numchannels = self.sources[0].numchannels self.numsamples = self.sources[0].numsamples for s in self.sources[1:]: if self.sample_freq != s.sample_freq: raise ValueError("Sample frequency of %s does not fit" % s) if self.numchannels != s.numchannels: raise ValueError("Channel count of %s does not fit" % s) if self.numsamples != s.numsamples: raise ValueError("Number of samples of %s does not fit" % s) def result(self, num): """ Python generator that yields the output block-wise. The outputs from the sources in the list are being added. Parameters ---------- num : integer This parameter defines the size of the blocks to be yielded (i.e. the number of samples per block). Returns ------- Samples in blocks of shape (num, numchannels). The last block may be shorter than num. """ gens = [i.result(num) for i in self.sources[1:]] for temp in self.sources[0].result(num): sh = temp.shape[0] for g in gens: temp1 = next(g) if temp.shape[0] > temp1.shape[0]: temp = temp[:temp1.shape[0]] temp += temp1[:temp.shape[0]] yield temp if sh > temp.shape[0]: break
class DataFrameEditor(BasicEditorFactory): """Editor factory for basic data frame editor""" #: The editor implementation class. klass = Property() #: Should an index column be displayed. show_index = Bool(True) #: Should column headers be displayed. show_titles = Bool(True) #: Optional list of either column ID or pairs of (column title, column ID). columns = List(Union(Str, Tuple(Str, Str))) #: The format for each element, or a mapping column ID to format. formats = Union(Str, Dict, default_value="%s") #: The font for each element, or a mapping column ID to font. fonts = Union(Font, Dict, default_value="Courier 10") #: The optional extended name of the trait to synchronize the selection #: values with: selected = Str() #: The optional extended name of the trait to synchronize the selection rows #: with: selected_row = Str() #: Whether or not to allow selection. selectable = Bool(True) #: Whether or not to allow for multiple selections multi_select = Bool(False) #: The optional extended name of the trait to synchronize the activated #: value with: activated = Str() #: The optional extended name of the trait to synchronize the activated #: value's row with: activated_row = Str() #: The optional extended name of the trait to synchronize left click data #: with. The data is a TabularEditorEvent: clicked = Str() #: The optional extended name of the trait to synchronize left double click #: data with. The data is a TabularEditorEvent: dclicked = Str() #: The optional extended name of the Event trait that should be used to #: trigger a scroll-to command. The data is an integer giving the row. scroll_to_row = Str() #: Deprecated: Controls behavior of scroll to row and scroll to column scroll_to_row_hint = Property(Str, observe="scroll_to_position_hint") #: (replacement of scroll_to_row_hint, but more clearly named) #: Controls behavior of scroll to row and scroll to column scroll_to_position_hint = Enum("visible", "center", "top", "bottom") #: The optional extended name of the Event trait that should be used to #: trigger a scroll-to command. The data is an integer giving the column. scroll_to_column = Str() #: The optional extended name of the trait to synchronize right click data #: with. The data is a TabularEditorEvent: right_clicked = Str() #: The optional extended name of the trait to synchronize right double #: clicked data with. The data is a TabularEditorEvent: right_dclicked = Str() #: The optional extended name of the trait to synchronize column #: clicked data with. The data is a TabularEditorEvent: column_clicked = Str() #: The optional extended name of the trait to synchronize column #: right clicked data with. The data is a TabularEditorEvent: column_right_clicked = Str() #: Whether or not the entries can be edited. editable = Bool(False) #: What type of operations are allowed on the list: operations = List( Enum("delete", "insert", "append", "edit", "move"), ["delete", "insert", "append", "edit", "move"], ) #: The optional extended name of the trait used to indicate that a complete #: table update is needed: update = Str() #: The optional extended name of the trait used to indicate that the table #: just needs to be repainted. refresh = Str() #: Set to override the default dataframe adapter adapter = Instance(DataFrameAdapter) def _get_klass(self): """The class used to construct editor objects.""" return toolkit_object("data_frame_editor:_DataFrameEditor") def _get_scroll_to_row_hint(self): warnings.warn( "Use of scroll_to_row_hint trait is deprecated. " "Use scroll_to_position_hint instead.", DeprecationWarning, ) return self.scroll_to_position_hint def _set_scroll_to_row_hint(self, hint): warnings.warn( "Use of scroll_to_row_hint trait is deprecated. " "Use scroll_to_position_hint instead.", DeprecationWarning, ) self.scroll_to_position_hint = hint
class ListModel(HasTraits): value = List() possible_values = List([(1, "one"), (2, "two")])