class ValueLabel(Label): value = Either(Int, Float, Str) def _get_text(self): return self.text.format(self.value)
class BaseFloatModel(HasTraits): value = BaseFloat value_or_none = Either(None, BaseFloat) float_or_text = Either(Float, Unicode)
class CompositeGridModel(GridModel): """ A CompositeGridModel is a model whose underlying data is a collection of other grid models. """ # The models this model is comprised of. data = List(Instance(GridModel)) # The rows in the model. rows = Either(None, List(Instance(GridRow))) # The cached data indexes. _data_index = Dict() # ------------------------------------------------------------------------ # 'object' interface. # ------------------------------------------------------------------------ def __init__(self, **traits): """ Create a CompositeGridModel object. """ # Base class constructor super(CompositeGridModel, self).__init__(**traits) self._row_count = None # ------------------------------------------------------------------------ # 'GridModel' interface. # ------------------------------------------------------------------------ def get_column_count(self): """ Return the number of columns for this table. """ # for the composite grid model, this is simply the sum of the # column counts for the underlying models count = 0 for model in self.data: count += model.get_column_count() return count def get_column_name(self, index): """ Return the name of the column specified by the (zero-based) index. """ model, new_index = self._resolve_column_index(index) return model.get_column_name(new_index) def get_column_size(self, index): """ Return the size in pixels of the column indexed by col. A value of -1 or None means use the default. """ model, new_index = self._resolve_column_index(index) return model.get_column_size(new_index) def get_cols_drag_value(self, cols): """ Return the value to use when the specified columns are dragged or copied and pasted. cols is a list of column indexes. """ values = [] for col in cols: model, real_col = self._resolve_column_index(col) values.append(model.get_cols_drag_value([real_col])) return values def get_cols_selection_value(self, cols): """ Return the value to use when the specified cols are selected. This value should be enough to specify to other listeners what is going on in the grid. rows is a list of row indexes. """ return self.get_cols_drag_value(self, cols) def get_column_context_menu(self, col): """ Return a MenuManager object that will generate the appropriate context menu for this column.""" model, new_index = self._resolve_column_index(col) return model.get_column_context_menu(new_index) def sort_by_column(self, col, reverse=False): """ Sort model data by the column indexed by col. The reverse flag indicates that the sort should be done in reverse. """ pass def is_column_read_only(self, index): """ Return True if the column specified by the zero-based index is read-only. """ model, new_index = self._resolve_column_index(index) return model.is_column_read_only(new_index) def get_row_count(self): """ Return the number of rows for this table. """ # see if we've already calculated the row_count if self._row_count is None: row_count = 0 # return the maximum rows of any of the contained models for model in self.data: rows = model.get_row_count() if rows > row_count: row_count = rows # save the result for next time self._row_count = row_count return self._row_count def get_row_name(self, index): """ Return the name of the row specified by the (zero-based) index. """ label = None # if the rows list exists then grab the label from there... if self.rows is not None: if len(self.rows) > index: label = self.rows[index].label # ... otherwise generate it from the zero-based index. else: label = str(index + 1) return label def get_rows_drag_value(self, rows): """ Return the value to use when the specified rows are dragged or copied and pasted. rows is a list of row indexes. """ row_values = [] for rindex in rows: row = [] for model in self.data: new_data = model.get_rows_drag_value([rindex]) # if it's a list then we assume that it represents more than # one column's worth of values if isinstance(new_data, list): row.extend(new_data) else: row.append(new_data) # now save our new row value row_values.append(row) return row_values def is_row_read_only(self, index): """ Return True if the row specified by the zero-based index is read-only. """ read_only = False if self.rows is not None and len(self.rows) > index: read_only = self.rows[index].read_only return read_only def get_type(self, row, col): """ Return the type of the value stored in the table at (row, col). """ model, new_col = self._resolve_column_index(col) return model.get_type(row, new_col) def get_value(self, row, col): """ Return the value stored in the table at (row, col). """ model, new_col = self._resolve_column_index(col) return model.get_value(row, new_col) def get_cell_selection_value(self, row, col): """ Return the value stored in the table at (row, col). """ model, new_col = self._resolve_column_index(col) return model.get_cell_selection_value(row, new_col) def resolve_selection(self, selection_list): """ Returns a list of (row, col) grid-cell coordinates that correspond to the objects in selection_list. For each coordinate, if the row is -1 it indicates that the entire column is selected. Likewise coordinates with a column of -1 indicate an entire row that is selected. Note that the objects in selection_list are model-specific. """ coords = [] for selection in selection_list: # we have to look through each of the models in order # for the selected object for model in self.data: cells = model.resolve_selection([selection]) # we know this model found the object if cells comes back # non-empty if cells is not None and len(cells) > 0: coords.extend(cells) break return coords # fixme: this context menu stuff is going in here for now, but it # seems like this is really more of a view piece than a model piece. # this is how the tree control does it, however, so we're duplicating # that here. def get_cell_context_menu(self, row, col): """ Return a MenuManager object that will generate the appropriate context menu for this cell.""" model, new_col = self._resolve_column_index(col) return model.get_cell_context_menu(row, new_col) def is_cell_empty(self, row, col): """ Returns True if the cell at (row, col) has a None value, False otherwise.""" model, new_col = self._resolve_column_index(col) if model is None: return True else: return model.is_cell_empty(row, new_col) def is_cell_editable(self, row, col): """ Returns True if the cell at (row, col) is editable, False otherwise. """ model, new_col = self._resolve_column_index(col) return model.is_cell_editable(row, new_col) def is_cell_read_only(self, row, col): """ Returns True if the cell at (row, col) is not editable, False otherwise. """ model, new_col = self._resolve_column_index(col) return model.is_cell_read_only(row, new_col) def get_cell_bg_color(self, row, col): """ Return a wxColour object specifying what the background color of the specified cell should be. """ model, new_col = self._resolve_column_index(col) return model.get_cell_bg_color(row, new_col) def get_cell_text_color(self, row, col): """ Return a wxColour object specifying what the text color of the specified cell should be. """ model, new_col = self._resolve_column_index(col) return model.get_cell_text_color(row, new_col) def get_cell_font(self, row, col): """ Return a wxFont object specifying what the font of the specified cell should be. """ model, new_col = self._resolve_column_index(col) return model.get_cell_font(row, new_col) def get_cell_halignment(self, row, col): """ Return a string specifying what the horizontal alignment of the specified cell should be. Return 'left' for left alignment, 'right' for right alignment, or 'center' for center alignment. """ model, new_col = self._resolve_column_index(col) return model.get_cell_halignment(row, new_col) def get_cell_valignment(self, row, col): """ Return a string specifying what the vertical alignment of the specified cell should be. Return 'top' for top alignment, 'bottom' for bottom alignment, or 'center' for center alignment. """ model, new_col = self._resolve_column_index(col) return model.get_cell_valignment(row, new_col) # ------------------------------------------------------------------------ # protected 'GridModel' interface. # ------------------------------------------------------------------------ def _delete_rows(self, pos, num_rows): """ Implementation method for delete_rows. Should return the number of rows that were deleted. """ for model in self.data: model._delete_rows(pos, num_rows) return num_rows def _insert_rows(self, pos, num_rows): """ Implementation method for insert_rows. Should return the number of rows that were inserted. """ for model in self.data: model._insert_rows(pos, num_rows) return num_rows def _set_value(self, row, col, value): """ Implementation method for set_value. Should return the number of rows, if any, that were appended. """ model, new_col = self._resolve_column_index(col) model._set_value(row, new_col, value) return 0 # ------------------------------------------------------------------------ # private interface # ------------------------------------------------------------------------ def _resolve_column_index(self, index): """ Resolves a column index into the correct model and adjusted index. Returns the target model and the corrected index. """ real_index = index cached = None # self._data_index.get(index) if cached is not None: model, col_index = cached else: model = None for m in self.data: cols = m.get_column_count() if real_index < cols: model = m break else: real_index -= cols self._data_index[index] = (model, real_index) return model, real_index def _data_changed(self): """ Called when the data trait is changed. Since this is called when our underlying models change, the cached results of the column lookups is wrong and needs to be invalidated. """ self._data_index.clear() def _data_items_changed(self): """ Called when the members of the data trait have changed. Since this is called when our underlying model change, the cached results of the column lookups is wrong and needs to be invalidated. """ self._data_index.clear()
class ScatterPlot(BaseXYPlot): """ Renders a scatter plot, given an index and value arrays. """ # The CompiledPath to use if **marker** is set to "custom". This attribute # must be a compiled path for the Kiva context onto which this plot will # be rendered. Usually, importing kiva.GraphicsContext will do # the right thing. custom_symbol = Any #------------------------------------------------------------------------ # Styles on a ScatterPlot #------------------------------------------------------------------------ # The type of marker to use. This is a mapped trait using strings as the # keys. marker = MarkerTrait # The pixel size of the markers, not including the thickness of the outline. # Default value is 4.0. # TODO: for consistency, there should be a size data source and a mapper marker_size = Either(Float, Array) # The function which actually renders the markers render_markers_func = Callable(render_markers) # The thickness, in pixels, of the outline to draw around the marker. If # this is 0, no outline is drawn. line_width = Float(1.0) # The fill color of the marker. color = black_color_trait # The color of the outline to draw around the marker. outline_color = black_color_trait # The RGBA tuple for rendering lines. It is always a tuple of length 4. # It has the same RGB values as color_, and its alpha value is the alpha # value of self.color multiplied by self.alpha. effective_color = Property(Tuple, depends_on=['color', 'alpha']) # The RGBA tuple for rendering the fill. It is always a tuple of length 4. # It has the same RGB values as outline_color_, and its alpha value is the # alpha value of self.outline_color multiplied by self.alpha. effective_outline_color = Property(Tuple, depends_on=['outline_color', 'alpha']) # Traits UI View for customizing the plot. traits_view = ScatterPlotView() #------------------------------------------------------------------------ # Selection and selection rendering # A selection on the lot is indicated by setting the index or value # datasource's 'selections' metadata item to a list of indices, or the # 'selection_mask' metadata to a boolean array of the same length as the # datasource. #------------------------------------------------------------------------ show_selection = Bool(True) selection_marker = MarkerTrait selection_marker_size = Float(4.0) selection_line_width = Float(1.0) selection_color = ColorTrait("yellow") selection_outline_color = black_color_trait #------------------------------------------------------------------------ # Private traits #------------------------------------------------------------------------ _cached_selected_pts = Trait(None, None, Array) _cached_selected_screen_pts = Array _cached_point_mask = Array _cached_selection_point_mask = Array _selection_cache_valid = Bool(False) #------------------------------------------------------------------------ # Overridden PlotRenderer methods #------------------------------------------------------------------------ def map_screen(self, data_array): """ Maps an array of data points into screen space and returns it as an array. Implements the AbstractPlotRenderer interface. """ # data_array is Nx2 array if len(data_array) == 0: return [] # XXX: For some reason, doing the tuple unpacking doesn't work: # x_ary, y_ary = transpose(data_array) # There is a mysterious error "object of too small depth for # desired array". However, if you catch this exception and # try to execute the very same line of code again, it works # without any complaints. # # For now, we just use slicing to assign the X and Y arrays. data_array = asarray(data_array) if len(data_array.shape) == 1: x_ary = data_array[0] y_ary = data_array[1] else: x_ary = data_array[:, 0] y_ary = data_array[:, 1] sx = self.index_mapper.map_screen(x_ary) sy = self.value_mapper.map_screen(y_ary) if self.orientation == "h": return transpose(array((sx, sy))) else: return transpose(array((sy, sx))) def map_data(self, screen_pt, all_values=True): """ Maps a screen space point into the "index" space of the plot. Overrides the BaseXYPlot implementation, and always returns an array of (index, value) tuples. """ x, y = screen_pt if self.orientation == 'v': x, y = y, x return array( (self.index_mapper.map_data(x), self.value_mapper.map_data(y))) def map_index(self, screen_pt, threshold=0.0, outside_returns_none=True, \ index_only = False): """ Maps a screen space point to an index into the plot's index array(s). Overrides the BaseXYPlot implementation.. """ index_data = self.index.get_data() value_data = self.value.get_data() if len(value_data) == 0 or len(index_data) == 0: return None if index_only and self.index.sort_order != "none": data_pt = self.map_data(screen_pt)[0] # The rest of this was copied out of BaseXYPlot. # We can't just used BaseXYPlot.map_index because # it expect map_data to return a value, not a pair. if ((data_pt < self.index_mapper.range.low) or \ (data_pt > self.index_mapper.range.high)) and outside_returns_none: return None try: ndx = reverse_map_1d(index_data, data_pt, self.index.sort_order) except IndexError, e: # if reverse_map raises this exception, it means that data_pt is # outside the range of values in index_data. if outside_returns_none: return None else: if data_pt < index_data[0]: return 0 else: return len(index_data) - 1 if threshold == 0.0: # Don't do any threshold testing return ndx x = index_data[ndx] y = value_data[ndx] if isnan(x) or isnan(y): return None sx, sy = self.map_screen([x, y]) if ((threshold == 0.0) or (screen_pt[0] - sx) < threshold): return ndx else: return None else:
class KromatographyApp(TaskGuiApplication): """ An application to run CADET simulations and confront to experiments. """ # ------------------------------------------------------------------------- # TaskGuiApplication interface # ------------------------------------------------------------------------- #: Application name in CamelCase. Used to set ETSConfig.application_data app_name = Str() # ------------------------------------------------------------------------- # KromatographyApp interface # ------------------------------------------------------------------------- #: Files to load at start-up. initial_files = Either(None, List(Str)) #: Files to load at start-up. recent_files = List #: Max number of project file paths to remember max_recent_files = Int #: Initial list of studies to launch on start initial_studies = List(Instance(Study)) #: Source of default data objects for creating new studies. datasource = Instance(DataSource) #: Manager for running CADET jobs job_manager = Instance(JobManager) #: File path to the user datasource that the app is using if applicable. datasource_file = Str #: File path to the log file in use. log_filepath = Str #: Global schema additions. extra_actions = List( Instance('pyface.tasks.action.schema_addition.SchemaAddition')) #: Force show all logger calls in console? Read preferences otherwise. verbose = Bool(False) #: Ask confirmation every time one closes a window? confirm_on_window_close = Bool #: Issue warning dialog warn_if_old_file = Bool(True) #: Automatically close empty windows when opening a new one? auto_close_empty_windows_on_open = Bool # ------------------------------------------------------------------------- # TaskGuiApplication interface methods # ------------------------------------------------------------------------- def start(self): """ The application could be started with no argument, with files that should be open on start or with custom studies that should be opened on start. The application will build a task for any files or study passed at launch. """ from kromatography.ui.tasks.kromatography_task import KROM_EXTENSION from kromatography.ui.branding import APP_TITLE starting = super(KromatographyApp, self).start() if not starting: return False self._log_application_start() register_all_data_views() if self.initial_files: for filepath in self.initial_files: ext = os.path.splitext(filepath)[1] if ext == KROM_EXTENSION: self.open_project_from_file(filepath) elif ext == ".xlsx": self.build_study_from_file(filepath) else: msg = "{} was requested but {} is unable to load {} files." msg = msg.format(filepath, APP_TITLE, ext) logger.exception(msg) error(None, msg) elif self.initial_studies: for study in self.initial_studies: self.create_new_task_window(study=study) else: # Nothing was requested: open a new empty window self.create_new_task_window() self.job_manager.start() return True # ------------------------------------------------------------------------- # Task creation methods # ------------------------------------------------------------------------- def create_new_task_window(self, study=None): """ Create a new KromatographyProject, task and open a window with it. Parameters ---------- study : Study or None Create the task and window for the study passed. Returns ------- window : TaskWindow Window that was created, containing the newly created task. """ traits = {} if study is not None: traits["study"] = study model = KromatographyProject(**traits) task = KromatographyTask(project=model) return self._finalize_task_and_open_task_window(task) def new_study_from_experimental_study_file(self): """ Create new study from experimental study file from disk, prompting user for the path. """ from kromatography.utils.extra_file_dialogs import study_file_requester path = study_file_requester() if path is not None: self.build_study_from_file(path) def new_blank_project(self): """ Set the current study to non-blank so that the view updates to the editing mode. """ self.active_task.project.study.is_blank = False def build_study_from_file(self, filepath, allow_gui=True): """ Build a new task and window from loading an ExperimentalStudy file. Returns ------- TaskWindow Returns the newly created TaskWindow around the provided study. """ from kromatography.io.study import load_study_from_excel study = load_study_from_excel(filepath, datasource=self.datasource, allow_gui=allow_gui) window = self.create_new_task_window(study=study) if study.product_contains_strip: study.request_strip_fraction_tool() return window def request_project_from_file(self): """ Open a saved study from loading from file. """ from kromatography.utils.extra_file_dialogs import \ project_file_requester path = project_file_requester() if path is not None: self.open_project_from_file(path) def open_project_from_file(self, path): """ Open a saved task from a project file. """ from kromatography.io.task import load_project path = os.path.abspath(path) self.add_to_recent_files(path) already_open = self.activate_window_if_already_open(path) if already_open: msg = "Project {} already loaded.".format(path) logger.info(msg) else: try: task, legacy_file = load_project(path) except Exception as e: msg = ("The object found in {} didn't load successfully. Error" " was {}".format(path, e)) logger.exception(msg) error(None, msg) raise IOError(msg) if not isinstance(task, KromatographyTask): msg = "The object found in {} is not a {} project but a {}" msg = msg.format(path, APP_TITLE, type(task)) logger.exception(msg) error(None, msg) raise IOError(msg) self._finalize_task_and_open_task_window(task) if legacy_file and self.warn_if_old_file: from pyface.api import warning from ..ui.tasks.kromatography_task import KROM_EXTENSION msg = "The file {} doesn't use the newest {} format. It is " \ "recommended to re-save the project to ensure future " \ "readability." msg = msg.format(path, KROM_EXTENSION) warning(None, msg) if self.auto_close_empty_windows_on_open: self.close_empty_windows() return task def add_to_recent_files(self, path): """ Store the project files loaded. """ # Avoid duplicates: if path in self.recent_files: self.recent_files.remove(path) self.recent_files.insert(0, path) # Truncate if too many recent files if len(self.recent_files) > self.max_recent_files: self.recent_files.pop(-1) def activate_window_if_already_open(self, path): """ Returns if a project file has been opened already. If so, make its window active. Parameters ---------- path : str Absolute path to the project file we are testing. Returns ------- bool Whether that path was already open. """ window_to_activate = None for window in self.windows_created: task = window.active_task if task.project_filepath == path: window_to_activate = window # Bring TaskWindow in question to front: window.activate() break return window_to_activate is not None def open_about_dialog(self): self.about_dialog.open() def open_bug_report(self): from kromatography.utils.app_utils import build_bug_report_content information(None, build_bug_report_content(self), title="Report a bug / Send feedback") def open_documentation(self): doc_target = join(dirname(kromatography.__file__), "doc", "index.html") open_file(doc_target) def open_tutorial_files(self): tut_target = join(dirname(kromatography.__file__), "data", "tutorial_data") open_file(tut_target) def open_preferences(self): from kromatography.utils.app_utils import get_preferences from kromatography.tools.preferences_view import \ RevealChromatographyPreferenceView prefs = get_preferences() view = RevealChromatographyPreferenceView(model=prefs) view.edit_traits(kind="livemodal") def open_software_updater(self): from kromatography.tools.update_downloader import UpdateDownloader tool = UpdateDownloader() # Trigger a check before opening the UI: tool.check_button = True tool.edit_traits(kind="modal") def open_recent_project(self): from kromatography.ui.project_file_selector import ProjectFileSelector selector = ProjectFileSelector(path_list=self.recent_files) ui = selector.edit_traits(kind="livemodal") if ui.result: if isinstance(selector.selected, basestring): selector.selected = [selector.selected] for selected in selector.selected: self.open_project_from_file(selected) # ------------------------------------------------------------------------- # Menu generation methods # ------------------------------------------------------------------------- def close_empty_windows(self): from kromatography.ui.tasks.kromatography_task import is_task_blank for window in self.windows_created: task = window.active_task if is_task_blank(task): with self.skip_confirm_on_window_close(): window.close() def create_new_project_group(self): from kromatography.ui.menu_entry_names import ( NEW_BLANK_PROJECT_MENU_NAME, NEW_PROJECT_FROM_EXPERIMENT_MENU_NAME, OPEN_PROJECT_MENU_NAME) return SGroup( Action(name=NEW_BLANK_PROJECT_MENU_NAME, on_perform=self.new_blank_project, image=ImageResource('document-new')), Action(name=NEW_PROJECT_FROM_EXPERIMENT_MENU_NAME, accelerator='Ctrl+L', on_perform=self.new_study_from_experimental_study_file, image=ImageResource('applications-science')), Action(name=OPEN_PROJECT_MENU_NAME, accelerator='Ctrl+O', on_perform=self.request_project_from_file, image=ImageResource('document-open')), id='NewStudyGroup', name='NewStudy', ) def create_recent_project_group(self): return SGroup( Action(name="Recent Projects...", on_perform=self.open_recent_project, image=ImageResource('document-open-recent.png')), id='RecentProjectGroup', name='NewStudy', ) def create_close_group(self): return SGroup( Action(name='Exit' if IS_WINDOWS else 'Quit', accelerator='Alt+F4' if IS_WINDOWS else 'Ctrl+Q', on_perform=self.exit, image=ImageResource('system-shutdown')), id='QuitGroup', name='Quit', ) def create_undo_group(self): return SGroup(UndoAction(undo_manager=self.undo_manager, accelerator='Ctrl+Z'), RedoAction(undo_manager=self.undo_manager, accelerator='Ctrl+Shift+Z'), id='UndoGroup', name='Undo') def create_copy_group(self): return SGroup(Action(name='Cut', accelerator='Ctrl+X'), Action(name='Copy', accelerator='Ctrl+C'), Action(name='Paste', accelerator='Ctrl+V'), id='CopyGroup', name='Copy') def create_preference_group(self): from kromatography.ui.menu_entry_names import PREFERENCE_MENU_NAME return SGroup( Action(name=PREFERENCE_MENU_NAME, accelerator='Ctrl+,', on_perform=self.open_preferences, image=ImageResource('preferences-system')), id='PreferencesGroup', name='Preferences', ) def create_bug_report_group(self): from kromatography.ui.menu_entry_names import \ REPORT_ISSUE_FEEDBACK_MENU_NAME group = SGroup( Action(name=REPORT_ISSUE_FEEDBACK_MENU_NAME, on_perform=self.open_bug_report, image=ImageResource('mail-mark-important')), Action(name='Info about {} {}'.format(APP_FAMILY, APP_TITLE), on_perform=self.open_about_dialog, image=ImageResource('system-help')), id='HelpGroup', name='HelpGroup', ) return group def create_documentation_group(self): group = SGroup( Action(name='Show sample input files...', on_perform=self.open_tutorial_files, image=ImageResource('help-browser')), Action(name='Open documentation...', on_perform=self.open_documentation, image=ImageResource('help-contents')), id='DocsGroup', name='Documentation', ) return group def create_update_group(self): group = SGroup( Action(name='Check for updates...', on_perform=self.open_software_updater, image=ImageResource('system-software-update')), id='UpdateGroup', name='App Updater', ) return group # ------------------------------------------------------------------------- # KromatographyApp interface # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # Private interface # ------------------------------------------------------------------------- def _finalize_task_and_open_task_window(self, task, filename=""): """ Connect task to application, sync datasources and job manager and open task in a new window. """ from kromatography.utils.datasource_utils import \ prepare_datasource_catalog # Sync the datasources of the project and the study to the # application's datasource task.project.datasource = self.datasource task.project.job_manager = self.job_manager task.project.study.datasource = self.datasource # Add listeners for the project lists. Task will listen to this to # update the UI: task.project.add_object_deletion_listeners() task.filename = filename task.undo_manager = self.undo_manager task.extra_actions = self.extra_actions task._app = self window = self.create_task_window(task) if filename: task.set_clean_state(True) # Add attributes to contribute new entries to the object_catalog prepare_datasource_catalog(self.datasource) return window def _log_application_start(self): krom_version = kromatography.__version__ krom_build = kromatography.__build__ install_location = abspath(dirname(kromatography.__file__)) logger.info("") logger.info( " **********************************************************" ) # noqa logger.info(" * {} application launching... *".format( APP_TITLE)) # noqa logger.info( " * based on kromatography version {} build {} *".format( krom_version, krom_build)) # noqa logger.info(" * installed in {} *".format(install_location)) logger.info( " **********************************************************" ) # noqa logger.info("") def _on_closing_window(self, window, trait, old, new): """ Ask before window closing: save the user DS? Save project? """ from kromatography.ui.menu_entry_names import PREFERENCE_MENU_NAME if self.datasource.dirty and len(self.windows_created) == 1: title = "Save the User Datasource?" msg = ("Some changes have been made to the User Data. Do you want" " to save it and make it the new default User Data?") res = confirm(None, msg, title=title) if res == YES: task = window.active_task task.save_user_ds() # Ask for all window in case people change their mind if self.confirm_on_window_close: msg = "Are you sure you want to close the project? All un-saved " \ "changes will be lost. <br><br>(You can suppress this" \ " confirmation in the Preference panel: <i>Edit > {}</i>.)" msg = msg.format(PREFERENCE_MENU_NAME) response = confirm(None, msg, title="Close the project?") if response == NO: new.veto = True def _setup_logging(self): from kromatography.utils.app_utils import get_preferences, \ initialize_logging if self.verbose: verbose = True else: preferences = get_preferences() verbose = preferences.app_preferences.console_logging_level <= 10 self.log_filepath = initialize_logging(verbose=verbose) def _prepare_exit(self): """ Do any application-level state saving and clean-up. """ from kromatography.utils.app_utils import empty_cadet_input_folder, \ get_preferences if self.job_manager: self.job_manager.shutdown() self.job_manager = None # Lazily load preferences to give a chance to a user to change that # parameters after starting: preferences = get_preferences() if preferences.solver_preferences.auto_delete_solver_files_on_exit: empty_cadet_input_folder() # Remember the recent files preferences.file_preferences.recent_files = self.recent_files preferences.to_preference_file() # ------------------------------------------------------------------------- # Traits initialization methods # ------------------------------------------------------------------------- def _splash_screen_default(self): from kromatography.ui.image_resources import splash_screen splash_screen = SplashScreen(image=splash_screen) return splash_screen def _extra_actions_default(self): addition_list = [ SchemaAddition( id='krom.new_study_group', path='MenuBar/File', absolute_position='first', after='RecentProjectGroup', factory=self.create_new_project_group, ), SchemaAddition( id='krom.recent_proj_group', path='MenuBar/File', before='SaveGroup', factory=self.create_recent_project_group, ), SchemaAddition( id='krom.close_group', path='MenuBar/File', absolute_position='last', factory=self.create_close_group, ), SchemaAddition( id='krom.undo_group', path='MenuBar/Edit', absolute_position='first', factory=self.create_undo_group, ), SchemaAddition( id='krom.copy_group', path='MenuBar/Edit', after='UndoGroup', factory=self.create_copy_group, ), SchemaAddition( id='krom.preferences_group', path='MenuBar/Edit', after='CopyGroup', factory=self.create_preference_group, ), SchemaAddition( id='krom.help_group', path='MenuBar/Help', after='DocsGroup', factory=self.create_bug_report_group, ), SchemaAddition( id='krom.help_docs_menu', path='MenuBar/Help', absolute_position='first', factory=self.create_documentation_group, ), SchemaAddition( id='krom.help_update', path='MenuBar/Help', absolute_position='last', factory=self.create_update_group, ), ] return addition_list def _datasource_default(self): """ Build a DS from the latest stored version if possible. Build a default one otherwise. """ from kromatography.utils.app_utils import load_default_user_datasource return load_default_user_datasource()[0] def _datasource_file_default(self): from kromatography.utils.app_utils import load_default_user_datasource return load_default_user_datasource()[1] def _job_manager_default(self): from kromatography.model.factories.job_manager import \ create_start_job_manager from kromatography.utils.app_utils import get_preferences preferences = get_preferences() job_manager = create_start_job_manager( max_workers=preferences.solver_preferences.executor_num_worker) return job_manager def _app_name_default(self): return APP_TITLE.title().replace(" ", "") def _window_size_default(self): from kromatography.utils.app_utils import get_preferences prefs = get_preferences() ui_prefs = prefs.ui_preferences return ui_prefs.app_width, ui_prefs.app_height def _about_dialog_default(self): from app_common.pyface.ui.about_dialog import AboutDialog from kromatography.ui.image_resources import reveal_chrom_logo about_msg = copy(ABOUT_MSG) return AboutDialog( parent=None, additions=about_msg, html_container=ABOUT_HTML, include_py_qt_versions=False, title='About {} {}'.format(APP_FAMILY, APP_TITLE), image=reveal_chrom_logo, ) def _auto_close_empty_windows_on_open_default(self): from kromatography.utils.app_utils import get_preferences prefs = get_preferences() ui_prefs = prefs.ui_preferences return ui_prefs.auto_close_empty_windows_on_open def _confirm_on_window_close_default(self): from kromatography.utils.app_utils import get_preferences prefs = get_preferences() ui_prefs = prefs.ui_preferences return ui_prefs.confirm_on_window_close def _recent_files_default(self): from kromatography.utils.app_utils import get_preferences preferences = get_preferences() return preferences.file_preferences.recent_files def _max_recent_files_default(self): from kromatography.utils.app_utils import get_preferences preferences = get_preferences() return preferences.file_preferences.max_recent_files @contextmanager def skip_confirm_on_window_close(self): """ Utility to temporarily skip confirmation when closing a window. """ old = self.confirm_on_window_close self.confirm_on_window_close = False yield self.confirm_on_window_close = old
class ScrollArea(Container): """ A Container that displays just one of its children at a time. """ #: The horizontal scroll policy. horizontal_scrollbar_policy = ScrollbarPolicy #: The vertical scroll policy. vertical_scrollbar_policy = ScrollbarPolicy #: The preferred (width, height) size of the scroll area. Each item #: of the tuple can be either None or an integer. If None, then that #: component is requested from the child widget's size hint. As a #: default, the height is fixed to 200 and the width is taken from the #: widget. This accounts for the typical use case of display a lot #: of vertically laid-out information in a confined area. preferred_size = Tuple(Either(None, Int, default=None), Either(Int, None, default=200)) #: An object that manages the layout of this component and its #: direct children. In this case, it does nothing. layout = Instance(NullLayoutManager, ()) #: How strongly a component hugs it's contents' width. Scroll #: areas do not hug their width and are free to expand. hug_width = 'ignore' #: How strongly a component hugs it's contents' height. Scroll #: areas do not hug their height and are free to expand. hug_height = 'ignore' #: Overridden parent class trait. Only one child is allowed. children = List(Either(Instance(Control), Instance(Container)), maxlen=1) def initialize_layout(self): """ Initialize the layout of the children. A scroll area does not provide or maintain a layout manager, but its children may. So, this call is just forwarded on to the children of the scroll area. """ for child in self.children: if hasattr(child, 'initialize_layout'): child.initialize_layout() def size_hint(self): """ Use the given preferred size when specified and the widget's size hint when not. """ width, height = self.preferred_size width_hint, height_hint = self.abstract_obj.size_hint() if width is None: width = width_hint if height is None: height = height_hint return (width, height) def _preferred_size_changed(self): """ The change handler for the 'preferred_size' attribute. This emits a proper notification to the layout system to that a relayout can take place. """ self.size_hint_updated = True
class StreamlineFactory(DataModuleFactory): """Applies the Streamline mayavi module to the given VTK data object.""" _target = Instance(modules.Streamline, ()) linetype = Trait('line', 'ribbon', 'tube', adapts='streamline_type', desc="""the type of line-like object used to display the streamline.""") seedtype = Trait('sphere', { 'sphere': 0, 'line': 1, 'plane': 2, 'point': 3 }, desc="""the widget used as a seed for the streamlines.""") seed_visible = Bool( True, adapts='seed.widget.enabled', desc="Control the visibility of the seed.", ) seed_scale = CFloat( 1., desc="Scales the seed around its default center", ) seed_resolution = Either( None, CInt, desc='The resolution of the seed. Determines the number of ' 'seed points') integration_direction = Trait( 'forward', 'backward', 'both', adapts='stream_tracer.integration_direction', desc="The direction of the integration.", ) def _seedtype_changed(self): # XXX: this also acts for seed_scale and seed_resolution, but no # need to define explicit callbacks, as all the callbacks are # being called anyhow. self._target.seed.widget = widget = \ self._target.seed.widget_list[self.seedtype_] if not self.seed_scale == 1.: widget.enabled = True if self.seedtype == 'line': p1 = widget.point1 p2 = widget.point2 center = (p1 + p2) / 2. widget.point1 = center + self.seed_scale * (p1 - center) widget.point2 = center + self.seed_scale * (p2 - center) elif self.seedtype == 'plane': p1 = widget.point1 p2 = widget.point2 center = (p1 + p2) / 2. o = widget.origin widget.point1 = center + self.seed_scale * (p1 - center) widget.point2 = center + self.seed_scale * (p2 - center) widget.origin = center + self.seed_scale * (o - center) elif self.seedtype == 'sphere': widget.radius *= self.seed_scale # XXX: Very ugly, but this is only way I have found to # propagate changes. self._target.seed.stop() self._target.seed.start() widget.enabled = self.seed_visible if self.seed_resolution is not None: widget.enabled = True if self.seedtype in ('plane', 'line'): widget.resolution = self.seed_resolution elif self.seedtype == 'sphere': widget.phi_resolution = widget.theta_resolution = \ self.seed_resolution # XXX: Very ugly, but this is only way I have found to # propagate changes. self._target.seed.stop() self._target.seed.start() widget.enabled = self.seed_visible
class Block(HasTraits): 'A block of code that can be inspected, manipulated, and executed.' __this = Instance('Block') # (traits.This scopes dynamically (#986)) ########################################################################### # Block traits ########################################################################### ### Public traits ######################################################## # Input and output parameters inputs = Property(Instance(set)) _inputs = Instance(set) outputs = Property(Instance(set)) _outputs = Instance(set) conditional_outputs = Property(Instance(set)) _conditional_outputs = Instance(set) all_outputs = Property(Instance(set)) _get_all_outputs = lambda self: self.outputs | self.conditional_outputs const_assign = Property(Instance(tuple)) fromimports = Property(Instance(set)) imports_ast = Property(Instance(AST)) # The sequence of sub-blocks that make up this block, if any. If we don't # decompose into sub-blocks, 'sub_blocks' is None. sub_blocks = List(__this) # The AST that represents our behavior # AST replaces Node # Aslo note that this variable name is hard to choose. # It was originally called 'ast', but since this is the same name as # the module, it needed to be changed. Other possibilities considered # were: as_tree, Ast, a_s_tree, astree, a_s_t. ast_tree = Instance(AST) # The name of the file this block represents, if any. For blocks that # aren't files, the default value just shows repr(self). Annotates # tracebacks. filename = Instance(str) # (Default 'None', allow 'None') # A unique identifier uuid = Instance(UUID) _uuid_default = lambda self: uuid4() # Enabling this makes tracebacks less useful but speeds up Block.execute. # The problem is that a code object has a unique filename ('co_filename'), # so we can compile all of our sub-blocks' ASTs into a single code object # only if we ignore the fact that they might each come from different # files. no_filenames_in_tracebacks = Bool(False) # Is this block the result of merging other blocks? grouped = Bool(False) ### Protected traits ##################################################### # The dependency graph for 'sub_blocks', if they exist. If we don't # decompose into sub-blocks, '_dep_graph' is None. The '_dep_graph' # property uses '__dep_graph' as a cache, and '__dep_graph_is_valid = # False' invalidates the cache. ('__dep_graph = None' is valid, so we need # to track validity with another variable.) _dep_graph = Property(Either(Dict, None)) __dep_graph = Either(Dict, None) __dep_graph_is_valid = Bool(False) _code = Property(depends_on='_code_invalidated, ast') _code_invalidated = Event() # Flag to break call cycles when we update 'ast' and 'sub_blocks' _updating_structure = Bool(False) codestring = Property _stored_string = Str('') # A cache for block restrictions. Invalidates when structure changes. __restrictions = Dict ########################################################################### # object interface ########################################################################### def __init__(self, x=(), file=None, grouped=False, **kw): super(Block, self).__init__(**kw) # fixme: Why not use static handlers for this? self.on_trait_change(self._structure_changed, 'sub_blocks') self.on_trait_change(self._structure_changed, 'sub_blocks_items') self.on_trait_change(self._structure_changed, 'ast_tree') # Interpret arguments: is_file_object = lambda o: hasattr(o, 'read') and hasattr(o, 'name') # 'file' -> 'x' if file is not None: # Turn 'file' into a file object and move to 'x' if isinstance(file, str): file = open(file) elif not is_file_object(file): raise ValueError("Expected 'file' to be a file or string, " "got %r" % file) x = file # 'x': file object -> string saved_filename = None if is_file_object(x): self.filename = x.name saved_filename = self.filename x = x.read() # 'x' -> 'self.ast_tree' or 'self.sub_blocks' if isinstance(x, str): self.ast_tree = ast.parse(x, mode='exec') # (BlockTransformer handles things like 'import *') self.ast_tree = BlockTransformer().visit(self.ast_tree) self._stored_string = x elif isinstance(x, AST): # push an exception handler onto the stack to ensure that the calling function gets the error #push_exception_handler(handler = lambda o,t,ov,nv: None, reraise_exceptions=True) self._updating_structure = True self.ast_tree = x self._updating_structure = False #pop_exception_handler() elif is_sequence(x): if len(x) == 0: self.ast_tree = stmt([]) sub_blocks = [] for block in map(to_block, x): if block.sub_blocks is None or len(block.sub_blocks) == 0: sub_blocks.append(block) else: sub_blocks += block.sub_blocks self.sub_blocks = sub_blocks else: raise ValueError( 'Expecting file, string, AST, or sequence. Got %r' % x) # prepare the inputs and outputs self._clear_cache_inputs_and_outputs() # We really want to keep the filename for "pristine" blocks, and # _structure_changed nukes it most times self.filename = saved_filename # Set flag whether this is a grouped block or not self.grouped = grouped def __eq__(self, other): return type(self) == type(other) and self.uuid == other.uuid def __hash__(self): return hash((type(self), self.uuid)) def __getstate__(self): # Evaluate attributes with non-deterministic default values to force # their initialization (see #1023) self.uuid # get the state but remove the code object cache state = super(Block, self).__getstate__() state['_Block_version'] = 1 return state def __setstate__(self, state): version = state.pop('_Block_version', 0) if version < 1: if 'inputs' in state: state['_inputs'] = state.pop('inputs') if 'outputs' in state: state['_outputs'] = state.pop('outputs') if 'conditional_outputs' in state: state['_conditional_outputs'] = state.pop( 'conditional_outputs') super(Block, self).__setstate__(state) # reset the dynamic trait change handlers # fixme: Why not use static handlers for this? self.on_trait_change(self._structure_changed, 'sub_blocks') self.on_trait_change(self._structure_changed, 'sub_blocks_items') self.on_trait_change(self._structure_changed, 'ast_tree') def __repr__(self): return '%s(uuid=%s)' % (self.__class__.__name__, self.uuid) def __str__(self): return repr(self) # TODO Unparse ast (2.5) (cf. #1167) def _print_debug_graph(self, graph): """ Only to be used for debugging- prints each node of the graph with its immediate dependents following """ print("--------------------------------------") for k in list(graph.keys()): if isinstance(k, Block): print(k.ast) for dep in graph[k]: if isinstance(dep, Block): print(" %s" % dep.ast) else: print(" '%s'" % dep[0]) ########################################################################### # Block public interface ########################################################################### def remove_sub_block(self, uuid): if self.sub_blocks is None: raise KeyError() for sb in self.sub_blocks: if sb.uuid == uuid: self.sub_blocks.remove(sb) break def is_empty(self): """ Return true if 'block' has an empty AST. """ return isinstance(self.ast, Stmt) and len(self.ast.nodes) == 0 def execute(self, local_context, global_context={}, continue_on_errors=False): """Execute the block in local_context, optionally specifying a global context. If continue_on_errors is specified, continue executing code after an exception is thrown and throw the exceptions at the end of execution. if more than one exception was thrown, combine them in a CompositeException""" # To get tracebacks to show the right filename for any line in any # sub-block, we need each sub-block to compile its own '_code' since a # code object only keeps one filename. This is slow, so we give the # user the option 'no_filenames_in_tracebacks' to gain speed but lose # readability of tracebacks. if (len(self.sub_blocks) == 0 or self.no_filenames_in_tracebacks) and \ not continue_on_errors: if self.filename: local_context['__file__'] = self.filename exec(self._code, global_context, local_context) else: if continue_on_errors: exceptions = [] for block in self.sub_blocks: try: block.execute(local_context, global_context) except Exception as e: # save the current traceback e.traceback = format_exc() exceptions.append(e) if exceptions: if len(exceptions) > 1: raise CompositeException(exceptions) else: raise exceptions[0] else: for block in self.sub_blocks: block.execute(local_context, global_context) return def execute_impure(self, context, continue_on_errors=False, clean_shadow=True): """Execution of a block the does not has all of its dependecies satisfied. Summary: Allows the execution of code blocks containing impure functions. Description: If the code block to be executed contains an impure function, then the use of global and local contexts becomes more complex. execute_impure helps you do this appropriately. The crucial restrictions are (background, optional read): * The passed-in global context must be an actual dictionary. It is visible throughout the code block. * The passed-in local context is not visible inside the function, unless it is identical to the passed-in global context. * Any names defined in the code block top level (outside the function), become part of the passed-in local context, not necessarily part of the passed-in global context. Therefore (read this!): * If the function needs access to names from a passed-in context, that context must be global and must be a dict. * If the function needs access to names defined in the code block's top level (outside the function), including imports or other function definitions, then the passed-in local and global contexts must be identical, and must be a dict. To meet these requirements, execute_impure copies a context into a dict, then executes the code block with this dict serving as both local and global context. execute_impure also uses a shadow dictionary to track any calls to the context dict's __setitem__, allowing you to keep track of changes made to the context by the code block. * If a name is defined in the top level of the code block, then it will be saved in the shadow dictionary. * If a value in the context is a mutable object, then both the original context and the shadow dict hold references to it, and any changes to it will automatically be be reflected in the original context, not in the shadow dictionary. * Any global names defined inside a function in the code block (via the 'global' command) will not be reflected in the shadow dictionary, because the global context is always directly accessed by low-level c code (as of python versions through 3.2). Parameters: context: namespace in which to execute the code block(s) continue_on_errors: see method 'execute' clean_shadow: If True, then before returning the shadow dictionary, deletes from it all functions, methods, and items whose name begins with underscore. Returns the shadow dictionary, described above. A common usage would be to update the original context with the returned shadow. If the original context is a MultiContext, then by default the first subcontext would be updated with these shadowed names/values. """ shadowed = ShadowDict(context) self.execute(shadowed, shadowed, continue_on_errors) shadow = shadowed.shadow if clean_shadow: # Fixme: clean_shadow should probably remove a few more types, # but these are the obvious ones. shadow = dict( (name, value) for (name, value) in shadow.items() if name[0] != '_' and type(value) not in (types.FunctionType, types.ModuleType)) return shadow def invalidate_cache(self): """ Someone modified the block's internal ast. This method provides and explicit means to invalidating the cached _code object """ self._code_invalidated = True def restrict(self, inputs=(), outputs=()): ''' The minimal sub-block that computes 'outputs' from 'inputs'. Consider the block: x = expensive() x = 2 y = f(x, a) z = g(x, a) w = h(a) This block has inputs 'a' (and 'f', 'g', 'h', and 'expensive'), and outputs 'x', 'y', 'z', and 'w'. If one is only interested in computing 'z', then lines 1, 3, and 5 can be omitted. Similarly, if one is only interested in propagating the effects of changing 'a', then lines 1 and 2 can be omitted. And if one only wants to recompute 'z' after changing 'a', then all but line 4 can be omitted. In this fashion, 'restrict' computes the minimal sub-block that computes 'outputs' from 'inputs'. See the tests in 'block_test_case.BlockRestrictionTestCase' for concrete examples. Assumes 'inputs' and 'outputs' are subsets of 'self.inputs' and 'self.outputs', respectively. ''' inputs = set(inputs) outputs = set(outputs) # Look for results in the cache cache_key = (frozenset(inputs), frozenset(outputs)) if cache_key in self.__restrictions: return self.__restrictions[cache_key] # Validate the method arguments. # # 'inputs' are allowed to be in the block inputs or outputs to allow # for restricting intermidiate inputs. if not (inputs or outputs): raise ValueError('Must provide inputs or outputs') if not inputs.issubset(self.inputs | self.outputs | self.fromimports): raise ValueError( 'Unknown inputs: %s' % (inputs - self.inputs - self.outputs - self.fromimports)) if not outputs.issubset(self.all_outputs): raise ValueError('Unknown outputs: %s' % (outputs - self.all_outputs)) # Validate the block to make sure it is safe for restriction if self.validate_for_restriction() is not None: raise RuntimeError("Block failed to validate") # If we don't decompose, then we are already as restricted as possible if self.sub_blocks is None: return self # we must keep a list of import blocks. Imports are lost because # they are not reachable during the reversing of the graph import_sub_blocks = [] for sub_block in self.sub_blocks: if isinstance(sub_block.ast, compiler.ast.Import) or \ isinstance(sub_block.ast, compiler.ast.From): import_sub_blocks.append(sub_block) # We use the mock constructors `In` and `Out` to separate input and # output names in the dep graph in order to avoid cyclic graphs (in # case input and output names overlap) in_, out = object(), object() # (singletons) In = lambda x: (x, in_) Out = lambda x: (x, out) def wrap_names(wrap): "Wrap names and leave everything else alone" def g(x): if isinstance(x, str): return wrap(x) else: return x return g # Decorate input names with `In` and output names with `Out` so that # `g` isn't cyclic g = map_keys( wrap_names(Out), map_values(lambda l: list(map(wrap_names(In), l)), self._dep_graph)) # Find the subgraph reachable from inputs, and then find its subgraph # reachable from outputs. (We could also flip the order.) if inputs: # look in the outputs for intermediate inputs intermediates = list(map(Out, self.outputs.intersection(inputs))) inputs = inputs - self.outputs.intersection(inputs) # Find the intermediate's block node and replace it # with the intermediate value. This effectively cuts # the block's children off the tree. This means for # the code "c = a * b; d = c * 3", we are removing c's # dependency on "a" and "b" # # There is a special case which is handled here as well: # for the code "c = a * b; d = c * 3", is the user wants # to restrict the block with 'd' as an input, the result # is an empty sub-block for intermediate in intermediates: pruned_block = g[intermediate][0] # its possible someone tried to restrict an import, # which is not in the graph #fixme: uncommenting these breaks pruning intermediates, why is this here? # if not g.has_key(pruned_block): # intermediates.remove(intermediate) # continue # if intermediate is not removed, the resulting graph will # be cyclic g.pop(intermediate) pure_output = True for v in list(g.values()): if pruned_block in v: pure_output = False v.remove(pruned_block) v.append(intermediate) if pure_output: # pure outputs must be kept on the graph or else # they will not be reachable g[intermediate] = [Block("%s = %s" % \ (intermediate[0], intermediate[0]))] inputs = list(map(In, inputs)) + intermediates # if no inputs were valid, do not alter the graph if len(inputs) > 0: g = graph.reverse( graph.reachable_graph(graph.reverse(g), inputs)) if outputs: outputs = list(map(Out, outputs)) g = graph.reachable_graph( g, set(outputs).intersection(list(g.keys()))) # Create a new block from the remaining sub-blocks (ordered imports # first, then input to output, ignoring the variables at the ends) # and give it our filename remaining_sub_blocks = [ node for node in reversed(graph.topological_sort(g)) if isinstance(node, Block) ] # trim out redundant imports which can occur if a restricted output is # one of the imports: for sub_block in remaining_sub_blocks: if sub_block in import_sub_blocks: remaining_sub_blocks.remove(sub_block) b = Block(import_sub_blocks + remaining_sub_blocks) b.filename = self.filename # Cache result self.__restrictions[cache_key] = b return b def get_function(self, inputs=[], outputs=[]): """Return a function which takes the list of input variables as arguments and returns the given output_list. These lists determine the calling order for the function. """ if isinstance(outputs, str): outputs = [outputs] if isinstance(inputs, str): inputs = [inputs] block = self.restrict(inputs=inputs, outputs=outputs) # Speed up execution of the block block.no_filenames_in_tracebacks = True leni = len(inputs) leno = len(outputs) def simplefunc(*args): if len(args) != leni: raise ValueError("Must have %d inputs" % leni) namespace = {} for i, arg in enumerate(args): namespace[inputs[i]] = arg block.execute(namespace) if leno == 1: return namespace[outputs[0]] vals = [] for name in outputs: vals.append(namespace[name]) return tuple(vals) callstr = '(%s)' % ','.join(inputs) retstr = ','.join(outputs) simplefunc.__doc__ = "%s = <name>%s" % (retstr, callstr) simplefunc._block = block return simplefunc def validate_for_restriction(self): # Check to ensure that there is not sub_block that has the same # variable as an input and an output. Return the offending # sub_block if one exists, or return None if block is valid. if self.sub_blocks is None: if len(self.inputs.intersection(self.outputs)) > 0: return self else: for sb in self.sub_blocks: if len(sb.inputs.intersection(sb.outputs)) > 0: return sb return None ########################################################################### # Block protected interface ########################################################################### # 'ast_tree' determines 'sub_blocks' and 'sub_blocks' determines 'ast_tree', # so when one changes we update the other. Both together represent the main # structure of the Block object and determine 'inputs', 'outputs', # 'conditional_outputs', '_dep_graph', and '_code'. # # We compute '_dep_graph' and '_code' only when necessary and avoid # redundant computation. # I don't think this is needed anymore since new AST uses Module([body]) # not Module(Stmt([body])) like the old AST. # def _tidy_ast_tree(self): # if isinstance(self.ast_tree, Module): # self.ast_tree = self.ast_tree.body # if isinstance(self.ast_tree, stmt) and len(self.ast_tree._fields) == 1: # self.ast_tree = [node for node in self.ast_tree.iter_child_nodes()] def _structure_changed(self, name, new): if not self._updating_structure: try: self._updating_structure = True if name == 'ast_tree': # Compute our new sub-blocks and give them our filename sub_blocks = Block._decompose(self.ast_tree) self.sub_blocks = sub_blocks if self.sub_blocks is not None: for b in self.sub_blocks: b.filename = self.filename # Policy: Only atomic blocks have filenames self.filename = None elif name in ('sub_blocks', 'sub_blocks_items'): self.ast_tree = Module( [b.ast_tree for b in self.sub_blocks]) else: assert False # Invalidate caches self.__dep_graph_is_valid = False self.__restrictions.clear() self._stored_string = '' # update inputs and outputs self._clear_cache_inputs_and_outputs() finally: self._updating_structure = False def _clear_cache_inputs_and_outputs(self): self._inputs = None self._outputs = None self._conditional_outputs = None self._const_assign = None self._fromimports = None self._imports_ast = None def _set_inputs_and_outputs(self): if self.ast_tree is None: return nf = NameFinder() nf.visit(self.ast_tree) self._inputs = set(nf.free) self._outputs = set(nf.locals) self._conditional_outputs = set(nf.conditional_locals) temp = [unparse(x).strip() for x in nf.constlist] temp2 = [x.split('=')[0].strip() for x in temp] self._const_assign = (set(temp2), temp) self._fromimports = set(nf.fromimports) self._outputs -= self.fromimports self._imports_ast = nf.imports def _get_inputs(self): if self._inputs is None: self._set_inputs_and_outputs() return self._inputs def _get_outputs(self): if self._outputs is None: self._set_inputs_and_outputs() return self._outputs def _get_conditional_outputs(self): if self._conditional_outputs is None: self._set_inputs_and_outputs() return self._conditional_outputs def _get_const_assign(self): if self._const_assign is None: self._set_inputs_and_outputs() return self._const_assign def _get_fromimports(self): if self._fromimports is None: self._set_inputs_and_outputs() return self._fromimports def _get_imports_ast(self): if self._imports_ast is None: self._set_inputs_and_outputs() return self._imports_ast def _get_codestring(self): if self._stored_string != '': return self._stored_string else: return unparse(self.ast_tree) @cached_property def _get__code(self): # Policy: our AST is either a Module or something that fits in a # Stmt. (Note that a Stmt fits into a Stmt.) ast_tree = self.ast_tree if not isinstance(ast, Module): ast_tree = Module(None, stmt([ast_tree])) # Make a useful filename to display in tracebacks if not self.no_filenames_in_tracebacks: if self.filename is not None: filename = self.filename else: filename = '<%r>' % self else: filename = '(Block with filename suppressed)' return compile_ast(ast_tree, filename, 'exec') def _get__dep_graph(self): # Cache dep graphs if not self.__dep_graph_is_valid: inputs, outputs, conditional_outputs, self.__dep_graph = \ Block._compute_dependencies(self.sub_blocks) self.__dep_graph_is_valid = True return self.__dep_graph ########################################################################### # Block class interface ########################################################################### @classmethod def from_file(cls, f): # (XXX Deprecated) 'Create a Block from a source file.' import warnings warnings.warn(DeprecationWarning("Use Block.__init__(file=...)")) return cls(file=f) @classmethod def from_string(cls, s): # (XXX Deprecated) 'Create a Block from a code string.' import warnings warnings.warn(DeprecationWarning("Use Block.__init__")) return cls(s) ########################################################################### # Block class protected interface ########################################################################### @classmethod def _decompose(cls, ast_tree): ''' Decompose an AST into a sequence of blocks, if possible. Returns 'None' on failure. ''' assert isinstance(ast_tree, AST) # TODO Look within 'for', 'if', 'try', etc. (#1165) if isinstance(ast_tree, Module): result = [cls._decompose(node) for node in ast_tree.body] elif isinstance(ast_tree, stmt): if len(ast_tree._fields) == 0: result = [Block(ast_tree)] elif len(ast_tree._fields) == 1: # Treat 'Stmt([node])' the same as 'node' result = cls._decompose(getattr(ast_tree, ast_tree._fields[0])) else: result = Block(ast_tree) else: result = [Block(ast_tree)] return result ########################################################################### # Block static protected interface ########################################################################### @staticmethod def _compute_dependencies(blocks): ''' Given a sequence of blocks, compute the aggregate inputs, outputs, and dependency graph. Parameters ---------- blocks : List(Block) A list of blocks in order of execution to "tie-up" into a larger, single block. Returns ------- inputs : Set(Str) The input parameters to the new block. outputs : Set(Str) The output parameters to the new block. conditional_outputs : Set(Str) The conditional output parameters to the new block, i.e. names that might or might not be defined by an arbitrary execution of the block. dep_graph : Dict(Either(Block, Str), List(Either(Block,Str))) The dependency graph (directed, acyclic) relating the blocks from the given sequence: block A depends on block B iff an output from B is used as an input to A. Additionally, the names of the inputs and outputs for the new block are included in the graph to capture their dependency relations to the contained blocks: name X depends on block A iff X is an output of A, and block A depends on name X iff X is an input to A. (Alternative: make each Block track its own dependencies) ''' # An ad-hoc graph interface class Graph(dict): def link(self, k, v): return self.setdefault(k, set()).add(v) def unlink_from(self, k): if k in self: del self[k] # Deferred computations deferred = set() # Build dep_graph: a not transitively closed dependency graph that # relates blocks to the blocks and inputs they depend on, and outputs # to the last block that modifies or creates them inputs, outputs, conditional_outputs = set(), set(), set() dep_graph, env = Graph(), {} for b in blocks: # 'b' depends on the provider for each of its inputs or, if none # exists, it depends on the inputs themselves (as inputs to the # aggregate block). If a name is provided only conditionally, then # 'b' depends on both the provider and the input. for i in b.inputs: # We need to make sure that dotted names are not included if # their parent module or object is already in env. process_i, prefix, suffix = True, '', i while '.' in suffix: if prefix == '': prefix = suffix[:suffix.find('.')] else: prefix += suffix[:suffix.find('.')] if prefix in env: dep_graph.link(b, env[prefix]) process_i = False break suffix = suffix[suffix.find('.') + 1:] if process_i: if i in env: dep_graph.link(b, env[i]) if i not in env or i in conditional_outputs: inputs.add(i) dep_graph.link(b, i) for c in b.conditional_outputs: # 'b's outputs depend only on 'b' dep_graph.unlink_from(c) dep_graph.link(c, b) # 'b' depends on the provider for each of its conditional # outputs or, if none exists and the end result has an input of # the same name, 'b' depends on that input. (We defer the # latter test to check against the final set of inputs rather # than just the inputs we've found so far.) if c in env: dep_graph.link(b, env[c]) else: def f(b=b, c=c): if c in inputs: dep_graph.link(b, c) deferred.add(f) # 'b' contributes conditional outputs to the aggregate block # unless they are already unconditional if c not in outputs: conditional_outputs.add(c) # 'b' becomes the provider for its conditional outputs env[c] = b for o in b.outputs: # 'b's outputs depend only on 'b' dep_graph.unlink_from(o) dep_graph.link(o, b) # 'b' contributes its outputs to the aggregate block -- as # unconditional outputs outputs.add(o) conditional_outputs.discard(o) # 'b' becomes the provider for its outputs env[o] = b # Run deferred computations for f in deferred: f() return inputs, outputs, conditional_outputs, dep_graph
class ProcessValue(HasTraits): name = Str units = Str tag = Str func_name = Str change_threshold = Float(1e-20) period = Either(Float, Str) # "on_change" or number of seconds last_time = Float last_time_str = Property(depends_on='last_time') enabled = Bool last_value = Float timeout = Float plotid = Int conditionals = List(DashboardConditional) flag = Enum(NOERROR, WARNING, CRITICAL) path = Str record = Bool(False) display_name = Property def is_different(self, v): ret = None ct = time.time() tt = 60 * 60 # max time (s) allowed without a measurement taken # even if the current value is the same as the last value threshold = self.change_threshold if abs(self.last_value - v) > threshold or (self.last_time and ct - self.last_time > tt): # a = abs(self.last_value - v) > threshold # b = (self.last_time and ct - self.last_time > tt) # self.debug('a={} {}-{}>{}, b={}'.format(a, self.last_value, v,threshold, b)) self.last_value = v ret = True return ret def _get_display_name(self): n = self.name if self.units: n = '{} ({})'.format(n, self.units) return n def traits_view(self): v = View(VGroup(HGroup(UItem('enabled'), Readonly('name')), VGroup(HGroup(Readonly('tag'), Readonly('period')), HGroup(Readonly('last_time_str'), Readonly('last_value')), VGroup(UItem('conditionals', editor=ListEditor(editor=InstanceEditor(), style='custom', mutable=False)), show_border=True, label='Conditionals'), enabled_when='enabled'))) return v def _get_last_time_str(self): r = '' if self.last_time: r = convert_timestamp(self.last_time) return r
class MassSpecExportSpec(Loggable): runid = CStr labnumber = CStr aliquot = Either(CInt, Str) step = Str irradpos = CStr isotopes = Dict mass_spectrometer = Str extract_device = Str tray = Str position = Property(depends_on='_position') _position = Any timestamp = Float power_requested = Float(0) power_achieved = Float(0) extract_value = Float(0) duration = Float(0) cleanup = Float(0) duration_at_request = Float(0) first_stage_delay = CInt(0) runscript_name = Str runscript_text = Str comment = Str # data_path = Str # data_manager = Instance(H5DataManager, ()) update_rundatetime = Bool is_peak_hop = Bool peak_hop_detector = 'CDD' ic_factor_v = Float ic_factor_e = Float irradiation = Str level = Str irradiation_position = CInt production_ratios = Dict chron_dosages = List interference_corrections = Dict production_name = Str j = Any shared_logger = True @property def second_stage_delay(self): return self.cleanup def load_record(self, record): attrs = [('labnumber', 'labnumber'), ('aliquot', 'aliquot'), ('step', 'step'), ('uuid', 'uuid'), ('irradpos', 'labnumber'), ('timestamp', 'timestamp'), ('extract_device', 'extract_device'), ('tray', 'tray'), ('position', 'position'), ('power_requested', 'extract_value'), ('power_achieved', 'extract_value'), ('extract_value', 'extract_value'), ('duration', 'duration'), ('duration_at_request', 'duration'), ('first_stage_delay', 'duration'), ('cleanup', 'cleanup'), ('comment', 'comment'), ('irradiation', 'irradiation'), ('irradiation_position', 'irradiation_pos'), ('irradiation_pos', 'irradiation_pos'), ('level', 'irradiation_level'), ('irradiation_level', 'irradiation_level'), ('isotopes', 'isotopes'), ('tag', 'tag'), ('sample', 'sample'), ('material', 'material'), ('project', 'project'), ('mass_spectrometer', 'mass_spectrometer'), ('age', 'age'), ('age_err', 'age_err'), ('age_err_wo_j', 'age_err_wo_j'), ('age_err_wo_j_irrad', 'age_err_wo_j_irrad'), ('ar39decayfactor', 'ar39decayfactor'), ('ar37decayfactor', 'ar37decayfactor')] # if hasattr(record, 'spec'): # spec = record.spec # else: # spec = record for exp_attr, run_attr in attrs: if hasattr(record, run_attr): try: v = getattr(record, run_attr) self.debug('setting {} to {}'.format(exp_attr, v)) setattr(self, exp_attr, v) except TraitError, e: self.debug(e) # if hasattr(record, 'cdd_ic_factor'): # ic = record.cdd_ic_factor # if ic is None: # self.debug('Using default CDD IC factor 1.0') # ic = ufloat(1, 1.0e-20) # # self.ic_factor_v = float(ic.nominal_value) # self.ic_factor_e = float(ic.std_dev) # else: # self.debug('{} has no ic_factor attribute'.format(record, )) for a in ('chron_dosages', 'production_ratios', 'interference_corrections', 'production_name', 'j'): if hasattr(record, a): setattr(self, a, getattr(record, a)) else: self.debug('no attribute {}'.format(a))
class ModelInfo(HasTraits): """Model-specific level of information about a specific model.""" model_name = Str(desc=dedent(""" The name of the model. (Automatically populated from module name). """)) task_model = Bool(True, desc=dedent(""" If True, model the task using a design file matching the model name. """)) smooth_fwhm = Either( Float(2), None, desc=dedent(""" The size of the Gaussian smoothing kernel for spatial filtering. """), ) surface_smoothing = Bool( True, desc=dedent(""" If True, filter cortical voxels using Gaussian weights computed along the surface mesh. """), ) interpolate_noise = Bool( False, desc=dedent(""" If True, identify locally noisy voxels and replace replace their values using interpolation during spatial filtering. Warning: this option is still being refined. """), ) hpf_cutoff = Either( Float(128), None, usedefault=True, desc=dedent(""" The cutoff value (in seconds) for the temporal high-pass filter. """), ) percent_change = Bool( False, desc=dedent(""" If True, convert data to percent signal change units before model fit. """), ) nuisance_components = Dict(Enum("wm", "csf", "edge", "noise"), Int, usedefault=True, desc=dedent(""" Anatomical sources and number of components per source to include. """)) save_residuals = Bool( False, desc=dedent(""" If True, write out an image with the residual time series in each voxel after model fitting. """), ) hrf_derivative = Bool( True, desc=dedent(""" If True, include the temporal derivative of the HRF model. """), ) # TODO parameter names to filter the design and generate default contrasts? contrasts = List( Tuple(Str, List(Str), List(Float)), desc=dedent(""" Definitions for model parameter contrasts. Each item in the list should be a tuple with the fields: (1) the name of the contrast, (2) the names of the parameters included in the contrast, and (3) the weights to apply to the parameters. """), )
class PychronUVLaserManager(PychronLaserManager): optics_client = Instance('pychron.lasers.laser_managers.client.UVLaserOpticsClient') controls_client = Instance('pychron.lasers.laser_managers.client.UVLaserOpticsClient') fire = Event stop = Event fire_mode = Enum('Burst', 'Continuous') nburst = Property(depends_on='_nburst') _nburst = Int mask = Property(String(enter_set=True, auto_set=False), depends_on='_mask') _mask = Either(Str, Float) masks = Property attenuator = String(enter_set=True, auto_set=False) # attenuators = Property zoom = Range(0.0, 100.0) def set_reprate(self, v): self._ask('SetReprate {}'.format(v)) def extract(self, power, **kw): self._set_nburst(power) time.sleep(0.25) self._ask('Fire burst') time.sleep(0.25) self._block('IsFiring', period=0.5) def end_extract(self): self._ask('Fire stop') def trace_path(self, value, name, kind): if isinstance(name, list): name = name[0] # traces need to be prefixed with 'l' name = str(name) name = name.lower() # if not name.startswith('l'): # name = 'l{}'.format(name) cmd = 'TracePath {},{},{}'.format(value, name, kind) self.info('sending {}'.format(cmd)) self._ask(cmd) return self._block(cmd='IsTracing') def drill_point(self, value, name): cmd = 'DrillPoint' # =============================================================================== # # =============================================================================== def _fire_fired(self): if self.fire_mode == 'Continuous': mode = 'continuous' else: mode = 'burst' self.firing = True self._ask('Fire {}'.format(mode)) def _stop_fired(self): self.firing = False self._ask('Fire stop') @on_trait_change('mask, attenuator, zoom') def _motor_changed(self, name, new): if new is not None: t = Thread(target=self.set_motor, args=(name, new)) t.start() # =============================================================================== # # =============================================================================== def _opened_hook(self): nb = self._ask('GetNBurst') self._nburst = self._get_int(nb) mb = self._ask('GetBurstMode') if mb is not None: self.fire_mode = 'Burst' if mb == '1' else 'Continuous' self._mask = 0 def _move_to_position(self, pos, autocenter): cmd = 'GoToPoint' # if pos.startswith('t'): # if not TRANSECT_REGEX[0].match(pos): # cmd = None if isinstance(pos, (str, unicode)): if not pos: return if pos[0].lower() in ['t', 'l', 'd']: cmd = 'GoToNamedPosition' if cmd: cmd = '{},{}'.format(cmd, pos) self.info('sending {}'.format(cmd)) self._ask(cmd) time.sleep(0.5) r = self._block() self.update_position() return r # =============================================================================== # property get/set # =============================================================================== def _get_int(self, resp): r = 0 if resp is not None: try: r = int(resp) except (ValueError, TypeError): pass return r def _validate_nburst(self, v): try: return int(v) except (ValueError, TypeError): pass def _set_nburst(self, v): if v is not None: v = int(v) self._ask('SetNBurst {}'.format(v)) self._nburst = v def _get_nburst(self): return self._nburst def _set_mask(self, m): self._mask = m def _get_mask(self): return self._mask def _validate_mask(self, m): if m in self.masks: return m else: try: return float(m) except ValueError: pass @cached_property def _get_masks(self): return self._get_motor_values('mask_names') def _get_motor_values(self, name): p = os.path.join(paths.device_dir, 'fusions_uv', '{}.txt'.format(name)) values = [] if os.path.isfile(p): with open(p, 'r') as rfile: for lin in rfile: lin = lin.strip() if not lin or lin.startswith('#'): continue values.append(lin) return values def _controls_client_default(self): from pychron.lasers.laser_managers.client import UVLaserControlsClient return UVLaserControlsClient(model=self) def _optics_client_default(self): from pychron.lasers.laser_managers.client import UVLaserOpticsClient return UVLaserOpticsClient(model=self)
import wx import wx.adv as wz from traits.api import Str, Either from .constants import DefaultTitle from .helper import restore_window, save_window, GroupEditor from .ui_panel import fill_panel_for_group # ------------------------------------------------------------------------- # Trait definitions: # ------------------------------------------------------------------------- # Trait that allows only None or a string value none_str_trait = Either(None, Str, default="") def ui_wizard(ui, parent): """ Creates a wizard-based wxPython user interface for a specified UI object. """ # Create the copy of the 'context' we will need while editing: ui._context = context = ui.context new_context = { name: None if value is None else value.clone_traits() for name, value in context.items() } ui.context = new_context # Now bind the context values to the 'info' object:
class TraitGridModel(GridModel): """ A TraitGridModel builds a grid from a list of traits objects. Each row represents on object, each column one trait from those objects. All the objects must be of the same type. Optionally a user may pass in a list of trait names defining which traits will be shown in the columns and in which order. If this list is not passed in, then the first object is inspected and every trait from that object gets a column.""" # A 2-dimensional list/array containing the grid data. data = List(Any) # The column definitions columns = Either(None, List(Either(None, Str, Instance(TraitGridColumn)))) # The trait to look at to get the row name row_name_trait = Either(None, Str) # Allow column sorting? allow_column_sort = Bool(True) # A factory to generate new rows. If this is not None then it must # be a no-argument function. row_factory = Callable # ------------------------------------------------------------------------ # 'object' interface. # ------------------------------------------------------------------------ def __init__(self, **traits): """ Create a TraitGridModel object. """ # Base class constructor super(TraitGridModel, self).__init__(**traits) # if no columns are pass in then create the list of names # from the first trait in the list. if the list is empty, # the columns should be an empty list as well. self._auto_columns = self.columns if self.columns is None or len(self.columns) == 0: if self.data is not None and len(self.data) > 0: self._auto_columns = [] # we only add traits that aren't events, since events # are write-only for name, trait in self.data[0].traits().items(): if trait.type != "event": self._auto_columns.append(TraitGridColumn(name=name)) else: self._auto_columns = [] # attach trait handlers to the list object self.observe(self._on_data_changed, "data") self.observe(self._on_data_items_changed, "data:items") # attach appropriate trait handlers to objects in the list self.__manage_data_listeners(self.data) # attach a listener to the column definitions so we refresh when # they change self.observe(self._on_columns_changed, "columns") self.observe(self._on_columns_items_changed, "columns:items") # attach listeners to the column definitions themselves self.__manage_column_listeners(self.columns) # attach a listener to the row_name_trait self.observe(self._on_row_name_trait_changed, "row_name_trait") # ------------------------------------------------------------------------ # 'GridModel' interface. # ------------------------------------------------------------------------ def get_column_count(self): """ Return the number of columns for this table. """ return len(self._auto_columns) def get_column_name(self, index): """ Return the label of the column specified by the (zero-based) index. """ try: name = col = self._auto_columns[index] if isinstance(col, TraitGridColumn): if col.label is not None: name = col.label else: name = col.name except IndexError: name = "" return name def get_column_size(self, index): """ Return the size in pixels of the column indexed by col. A value of -1 or None means use the default. """ size = -1 try: col = self._auto_columns[index] if isinstance(col, TraitGridColumn): size = col.size except IndexError: pass return size def get_cols_drag_value(self, cols): """ Return the value to use when the specified columns are dragged or copied and pasted. cols is a list of column indexes. """ # iterate over every column, building a list of the values in that # column value = [] for col in cols: value.append(self.__get_data_column(col)) return value def get_cols_selection_value(self, cols): """ Returns a list of TraitGridSelection objects containing the object corresponding to the grid rows and the traits corresponding to the specified columns. """ values = [] for obj in self.data: for col in cols: values.append( TraitGridSelection(obj=obj, trait_name=self.__get_column_name(col))) return values def sort_by_column(self, col, reverse=False): """ Sort model data by the column indexed by col. """ # first check to see if we allow sorts by column if not self.allow_column_sort: return # see if a sorter is specified for this column try: column = self._auto_columns[col] name = self.__get_column_name(col) # by default we use cmp to sort on the traits key = None if (isinstance(column, TraitGridColumn) and column.sorter is not None): key = cmp_to_key(column.sorter) except IndexError: return def key_function(a): trait = getattr(a, name, None) if key: return key(trait) self.data.sort(key=key_function, reverse=reverse) # now fire an event to tell the grid we're sorted self.column_sorted = GridSortEvent(index=col, reversed=reverse) def is_column_read_only(self, index): """ Return True if the column specified by the zero-based index is read-only. """ return self.__get_column_readonly(index) def get_row_count(self): """ Return the number of rows for this table. """ if self.data is not None: count = len(self.data) else: count = 0 return count def get_row_name(self, index): """ Return the name of the row specified by the (zero-based) index. """ if self.row_name_trait is not None: try: row = self._get_row(index) if hasattr(row, self.row_name_trait): name = getattr(row, self.row_name_trait) except IndexError: name = str(index + 1) else: name = str(index + 1) return name def get_rows_drag_value(self, rows): """ Return the value to use when the specified rows are dragged or copied and pasted. rows is a list of row indexes. If there is only one row listed, return the corresponding trait object. If more than one row is listed then return a list of objects. """ # return a list of objects value = [] for index in rows: try: # note that we can't use get_value for this because it # sometimes returns strings instead of the actual value, # e.g. in cases where a float_format is specified value.append(self._get_row(index)) except IndexError: value.append(None) return value def get_rows_selection_value(self, rows): """ Returns a list of TraitGridSelection objects containing the object corresponding to the selected rows. """ values = [] for row_index in rows: values.append(TraitGridSelection(obj=self.data[row_index])) return values def is_row_read_only(self, index): """ Return True if the row specified by the zero-based index is read-only. """ return False def get_cell_editor(self, row, col): """ Return the editor for the specified cell. """ # print 'TraitGridModel.get_cell_editor row: ', row, ' col: ', col obj = self.data[row] trait_name = self.__get_column_name(col) trait = obj.base_trait(trait_name) if trait is None: return None factory = trait.get_editor() return TraitGridCellAdapter(factory, obj, trait_name, "") def get_cell_drag_value(self, row, col): """ Return the value to use when the specified cell is dragged or copied and pasted. """ # find the name of the column indexed by col # note that this code is the same as the get_value code but without # the potential string formatting column = self.__get_column(col) obj = self._get_row(row) value = self._get_data_from_row(obj, column) return value def get_cell_selection_value(self, row, col): """ Returns a TraitGridSelection object specifying the data stored in the table at (row, col). """ obj = self.data[row] trait_name = self.__get_column_name(col) return TraitGridSelection(obj=obj, trait_name=trait_name) def resolve_selection(self, selection_list): """ Returns a list of (row, col) grid-cell coordinates that correspond to the objects in objlist. For each coordinate, if the row is -1 it indicates that the entire column is selected. Likewise coordinates with a column of -1 indicate an entire row that is selected. For the TraitGridModel, the objects in objlist must be TraitGridSelection objects. """ cells = [] for selection in selection_list: try: row = self.data.index(selection.obj) except ValueError: continue column = -1 if selection.trait_name is not None: column = self._get_column_index_by_trait(selection.trait_name) if column is None: continue cells.append((row, column)) return cells def get_type(self, row, col): """ Return the value stored in the table at (row, col). """ typename = self.__get_column_typename(col) return typename def get_value(self, row, col): """ Return the value stored in the table at (row, col). """ value = self.get_cell_drag_value(row, col) formats = self.__get_column_formats(col) if (value is not None and formats is not None and type(value) in formats and formats[type(value)] is not None): try: format = formats[type(value)] if callable(format): value = format(value) else: value = format % value except TypeError: # not enough arguments? wrong kind of arguments? pass return value def is_cell_empty(self, row, col): """ Returns True if the cell at (row, col) has a None value, False otherwise.""" value = self.get_value(row, col) return value is None def is_cell_editable(self, row, col): """ Returns True if the cell at (row, col) is editable, False otherwise. """ return not self.is_column_read_only(col) # ------------------------------------------------------------------------ # protected 'GridModel' interface. # ------------------------------------------------------------------------ def _insert_rows(self, pos, num_rows): """ Inserts num_rows at pos and fires an event iff a factory method for new rows is defined. Otherwise returns 0. """ count = 0 if self.row_factory is not None: new_data = [] for i in range(num_rows): new_data.append(self.row_factory()) count = self._insert_rows_into_model(pos, new_data) self.rows_added = ("added", pos, new_data) return count def _delete_rows(self, pos, num_rows): """ Removes rows pos through pos + num_rows from the model. """ if pos + num_rows >= self.get_row_count(): num_rows = self.get_rows_count() - pos return self._delete_rows_from_model(pos, num_rows) def _set_value(self, row, col, value): """ Sets the value of the cell at (row, col) to value. Raises a ValueError if the value is vetoed or the cell at (row, col) does not exist. """ # print 'TraitGridModel._set_value: new: ', value new_rows = 0 # find the column indexed by col column = self.__get_column(col) obj = self._get_row(row) success = False if obj is not None: success = self._set_data_on_row(obj, column, value) else: # Add a new row. new_rows = self._insert_rows(self.get_row_count(), 1) if new_rows > 0: # now set the value on the new object obj = self._get_row(self.get_row_count() - 1) success = self._set_data_on_row(obj, column, value) if not success: # fixme: what do we do in this case? veto the set somehow? raise # an exception? pass return new_rows # ------------------------------------------------------------------------ # protected interface. # ------------------------------------------------------------------------ def _get_row(self, index): """ Return the object that corresponds to the row at index. Override this to handle very large data sets. """ return self.data[index] def _get_data_from_row(self, row, column): """ Retrieve the data specified by column for this row. Attribute can be either a member of the row object, or a no-argument method on that object. Override this method to provide alternative ways of accessing the data in the object. """ value = None if row is not None and column is not None: if not isinstance(column, TraitGridColumn): # first handle the case where the column # definition might be just a string if hasattr(row, column): value = getattr(row, column) elif column.name is not None and hasattr(row, column.name): # this is the case when the trait name is specified value = getattr(row, column.name) elif column.method is not None and hasattr(row, column.method): # this is the case when an object method is specified value = getattr(row, column.method)() if value is None: return None else: return str(value) # value def _set_data_on_row(self, row, column, value): """ Retrieve the data specified by column for this row. Attribute can be either a member of the row object, or a no-argument method on that object. Override this method to provide alternative ways of accessing the data in the object. """ success = False if row is not None and column is not None: if not isinstance(column, TraitGridColumn): if hasattr(row, column): # sometimes the underlying grid gives us 0/1 instead # of True/False. do some conversion here to make that # case worl. # if type(getattr(row, column)) == bool and \ # type(value) != bool: # convert the value to a boolean # value = bool(value) setattr(row, column, value) success = True elif column.name is not None and hasattr(row, column.name): # sometimes the underlying grid gives us 0/1 instead # of True/False. do some conversion here to make that # case worl. # if type(getattr(row, column.name)) == bool and \ # type(value) != bool: # convert the value to a boolean # value = bool(value) setattr(row, column.name, value) success = True # do nothing in the method case as we don't allow rows # defined to return a method value to set the value return success def _insert_rows_into_model(self, pos, new_data): """ Insert the given new rows into the model. Override this method to handle very large data sets. """ for data in new_data: self.data.insert(pos, data) pos += 1 def _delete_rows_from_model(self, pos, num_rows): """ Delete the specified rows from the model. Override this method to handle very large data sets. """ del self.data[pos, pos + num_rows] return num_rows # ------------------------------------------------------------------------ # trait handlers # ------------------------------------------------------------------------ def _on_row_name_trait_changed(self, event): """ Force the grid to refresh when any underlying trait changes. """ self.fire_content_changed() def _on_columns_changed(self, event): """ Force the grid to refresh when any underlying trait changes. """ self.__manage_column_listeners(event.old, remove=True) self.__manage_column_listeners(self.columns) self._auto_columns = self.columns self.fire_structure_changed() def _on_columns_items_changed(self, event): """ Force the grid to refresh when any underlying trait changes. """ self.__manage_column_listeners(event.removed, remove=True) self.__manage_column_listeners(event.added) self.fire_structure_changed() def _on_contained_trait_changed(self, event): """ Force the grid to refresh when any underlying trait changes. """ self.fire_content_changed() def _on_data_changed(self, event): """ Force the grid to refresh when the underlying list changes. """ self.__manage_data_listeners(event.old, remove=True) self.__manage_data_listeners(self.data) self.fire_structure_changed() def _on_data_items_changed(self, event): """ Force the grid to refresh when the underlying list changes. """ # if an item was removed then remove that item's listener self.__manage_data_listeners(event.removed, remove=True) # if items were added then add trait change listeners on those items self.__manage_data_listeners(event.added) self.fire_content_changed() # ------------------------------------------------------------------------ # private interface. # ------------------------------------------------------------------------ def __get_data_column(self, col): """ Return a 1-d list of data from the column indexed by col. """ row_count = self.get_row_count() coldata = [] for row in range(row_count): try: val = self.get_value(row, col) if val is None: coldata.append(None) else: coldata.append(val) # self.get_value(row, col)) except IndexError: coldata.append(None) return coldata def __get_column(self, col): try: column = self._auto_columns[col] except IndexError: column = None return column def __get_column_name(self, col): name = column = self.__get_column(col) if isinstance(column, TraitGridColumn): name = column.name return name def __get_column_typename(self, col): name = column = self.__get_column(col) typename = None if isinstance(column, TraitGridColumn): typename = column.typename return typename def __get_column_readonly(self, col): read_only = False column = self.__get_column(col) if isinstance(column, TraitGridColumn): read_only = column.read_only return read_only def __get_column_formats(self, col): formats = None column = self.__get_column(col) if isinstance(column, TraitGridColumn): formats = column.formats return formats def _get_column_index_by_trait(self, trait_name): cols = self._auto_columns for i in range(len(cols)): col = cols[i] if isinstance(col, TraitGridColumn): col_name = col.name else: col_name = col if col_name == trait_name: return i return None def __manage_data_listeners(self, list, remove=False): # attach appropriate trait handlers to objects in the list if list is not None: for item in list: item.observe(self._on_contained_trait_changed, remove=remove) def __manage_column_listeners(self, collist, remove=False): if collist is not None: for col in collist: if isinstance(col, TraitGridColumn): col.observe(self._on_columns_changed, remove=remove)
class NamedValue(HasTraits): name = Str value = Either(Str, Float, Int, None)
class TableColumn(HasPrivateTraits): """ Represents a column in a table editor. """ #------------------------------------------------------------------------- # Trait definitions: #------------------------------------------------------------------------- # Column label to use for this column: label = Str(UndefinedLabel) # Type of data contained by the column: type = Enum('text') # Text color for this column: text_color = Color('black') # Text font for this column: text_font = Font # Cell background color for this column: cell_color = Either(Color('white'), None) # Cell background color for non-editable columns: read_only_cell_color = Either(Color(0xF4F3EE), None) # Cell graph color: graph_color = Color(0xDDD9CC) # Horizontal alignment of text in the column: horizontal_alignment = Enum('left', ['left', 'center', 'right']) # Vertical alignment of text in the column: vertical_alignment = Enum('center', ['top', 'center', 'bottom']) # Horizontal cell margin horizontal_margin = Int(4) # Vertical cell margin vertical_margin = Int(3) # The image to display in the cell: image = Image # Renderer used to render the contents of this column: renderer = Any # A toolkit specific renderer # Is the table column visible (i.e., viewable)? visible = Bool(True) # Is this column editable? editable = Bool(True) # Is the column automatically edited/viewed (i.e. should the column editor # or popup be activated automatically on mouse over)? auto_editable = Bool(False) # Should a checkbox be displayed instead of True/False? show_checkbox = Bool(True) # Can external objects be dropped on the column? droppable = Bool(False) # Context menu to display when this column is right-clicked: menu = Instance(Menu) # The tooltip to display when the mouse is over the column: tooltip = Str # The width of the column (< 0.0: Default, 0.0..1.0: fraction of total table # width, > 1.0: absolute width in pixels): width = Float(-1.0) # The width of the column while it is being edited (< 0.0: Default, # 0.0..1.0: fraction of total table width, > 1.0: absolute width in # pixels): edit_width = Float(-1.0) # The height of the column cell's row while it is being edited # (< 0.0: Default, 0.0..1.0: fraction of total table height, # > 1.0: absolute height in pixels): edit_height = Float(-1.0) # The resize mode for this column. This takes precedence over other settings # (like **width**, above). # "interactive": column can be resized by users or programmatically # "fixed": users cannot resize the column, but it can be set programmatically # "stretch": the column will be resized to fill the available space # "resize_to_contents": column will be sized to fit the contents, but then cannot be resized resize_mode = Enum("interactive", "fixed", "stretch", "resize_to_contents") # The view (if any) to display when clicking a non-editable cell: view = AView # Optional maximum value a numeric cell value can have: maximum = Float(trait_value=True) #------------------------------------------------------------------------- # Returns the actual object being edited: #------------------------------------------------------------------------- def get_object(self, object): """ Returns the actual object being edited. """ return object #------------------------------------------------------------------------- # Gets the label of the column: #------------------------------------------------------------------------- def get_label(self): """ Gets the label of the column. """ return self.label #------------------------------------------------------------------------- # Returns the width of the column: #------------------------------------------------------------------------- def get_width(self): """ Returns the width of the column. """ return self.width #------------------------------------------------------------------------- # Returns the edit width of the column: #------------------------------------------------------------------------- def get_edit_width(self, object): """ Returns the edit width of the column. """ return self.edit_width #------------------------------------------------------------------------- # Returns the height of the column cell's row while it is being edited: #------------------------------------------------------------------------- def get_edit_height(self, object): """ Returns the height of the column cell's row while it is being edited. """ return self.edit_height #------------------------------------------------------------------------- # Gets the type of data for the column for a specified object: #------------------------------------------------------------------------- def get_type(self, object): """ Gets the type of data for the column for a specified object. """ return self.type #------------------------------------------------------------------------- # Returns the text color for the column for a specified object: #------------------------------------------------------------------------- def get_text_color(self, object): """ Returns the text color for the column for a specified object. """ return self.text_color_ #------------------------------------------------------------------------- # Returns the text font for the column for a specified object: #------------------------------------------------------------------------- def get_text_font(self, object): """ Returns the text font for the column for a specified object. """ return self.text_font #------------------------------------------------------------------------- # Returns the cell background color for the column for a specified object: #------------------------------------------------------------------------- def get_cell_color(self, object): """ Returns the cell background color for the column for a specified object. """ if self.is_editable(object): return self.cell_color_ return self.read_only_cell_color_ #------------------------------------------------------------------------- # Returns the cell background graph color for the column for a specified # object: #------------------------------------------------------------------------- def get_graph_color(self, object): """ Returns the cell background graph color for the column for a specified object. """ return self.graph_color_ #------------------------------------------------------------------------- # Returns the horizontal alignment for the column for a specified object: #------------------------------------------------------------------------- def get_horizontal_alignment(self, object): """ Returns the horizontal alignment for the column for a specified object. """ return self.horizontal_alignment #------------------------------------------------------------------------- # Returns the vertical alignment for the column for a specified object: #------------------------------------------------------------------------- def get_vertical_alignment(self, object): """ Returns the vertical alignment for the column for a specified object. """ return self.vertical_alignment #------------------------------------------------------------------------- # Returns the image to display for the column for a specified object: #------------------------------------------------------------------------- def get_image(self, object): """ Returns the image to display for the column for a specified object. """ return self.image #------------------------------------------------------------------------- # Returns the renderer for the column of a specified object: #------------------------------------------------------------------------- def get_renderer(self, object): """ Returns the renderer for the column of a specified object. """ return self.renderer #------------------------------------------------------------------------- # Returns whether the column is editable for a specified object: #------------------------------------------------------------------------- def is_editable(self, object): """ Returns whether the column is editable for a specified object. """ return self.editable #------------------------------------------------------------------------- # Returns whether the column is autoamtically edited/viewed for a specified # object: #------------------------------------------------------------------------- def is_auto_editable(self, object): """ Returns whether the column is automatically edited/viewed for a specified object. """ return self.auto_editable #------------------------------------------------------------------------- # Returns whether a specified value is valid for dropping on the column # for a specified object: #------------------------------------------------------------------------- def is_droppable(self, object, value): """ Returns whether a specified value is valid for dropping on the column for a specified object. """ return self.droppable #------------------------------------------------------------------------- # Returns the context menu to display when the user right-clicks on the # column for a specified object: #------------------------------------------------------------------------- def get_menu(self, object): """ Returns the context menu to display when the user right-clicks on the column for a specified object. """ return self.menu #------------------------------------------------------------------------- # Returns the tooltip to display when the user mouses over the column for # a specified object: #------------------------------------------------------------------------- def get_tooltip(self, object): """ Returns the tooltip to display when the user mouses over the column for a specified object. """ return self.tooltip #------------------------------------------------------------------------- # Returns the view to display when clicking a non-editable cell: #------------------------------------------------------------------------- def get_view(self, object): """ Returns the view to display when clicking a non-editable cell. """ return self.view #------------------------------------------------------------------------- # Returns the maximum value a numeric column can have: #------------------------------------------------------------------------- def get_maximum(self, object): """ Returns the maximum value a numeric column can have. """ return self.maximum #------------------------------------------------------------------------- # Called when the user clicks on the column: #------------------------------------------------------------------------- def on_click(self, object): """ Called when the user clicks on the column. """ pass #------------------------------------------------------------------------- # Called when the user double-clicks on the column: #------------------------------------------------------------------------- def on_dclick(self, object): """ Called when the user clicks on the column. """ pass #------------------------------------------------------------------------- # Returns the result of comparing the column of two different objects: #------------------------------------------------------------------------- def cmp(self, object1, object2): """ Returns the result of comparing the column of two different objects. This is deprecated. """ return ((self.key(object1) > self.key(object2)) - (self.key(object1) < self.key(object2))) #------------------------------------------------------------------------- # Returns the string representation of the table column: #------------------------------------------------------------------------- def __str__(self): """ Returns the string representation of the table column. """ return self.get_label()
class ComputedValue(NamedValue): error = Either(Str, Float, Int) tag = Str display_value = Bool(True) sig_figs = Int(5) value_tag = Str
class BaseTimer(ABCHasTraits): """ Base class for timer classes. This class has a class variable which tracks active timers to prevent failures caused by garbage collection. A timer is added to this tracker when it is started if the repeat value is not None. """ # BaseTimer interface ---------------------------------------------------- #: Class variable tracking all active timers. _active_timers = set() # ITimer interface ------------------------------------------------------- #: The interval at which to call the callback in seconds. interval = Range(low=0.0, value=0.05) #: The number of times to repeat the callback, or None if no limit. repeat = Either(None, Int) #: The maximum length of time to run in seconds, or None if no limit. expire = Either(None, Float) #: Property that controls the state of the timer. active = Property(Bool, observe="_active") # Private interface ------------------------------------------------------ #: Whether or not the timer is currently running. _active = Bool() #: The most recent start time. _start_time = Float() # ------------------------------------------------------------------------- # ITimer interface # ------------------------------------------------------------------------- @classmethod def timer(cls, **traits): """ Convenience method that creates and starts a timer. """ timer = cls(**traits) timer.start() return timer @classmethod def single_shot(cls, **traits): timer = cls(repeat=1, **traits) timer.start() return timer def start(self): """ Start the timer. """ if not self._active: if self.repeat is not None: self._active_timers.add(self) if self.expire is not None: self._start_time = perf_counter() self._active = True self._start() def stop(self): """ Stop the timer. """ if self._active: self._active_timers.discard(self) self._stop() self._active = False def perform(self): """ Perform the callback. The timer will stop if repeats is not None and less than 1, or if the `_perform` method raises StopIteration. """ if self.expire is not None: if perf_counter() - self._start_time > self.expire: self.stop() return if self.repeat is not None: self.repeat -= 1 try: self._perform() except StopIteration: self.stop() except: self.stop() raise else: if self.repeat is not None and self.repeat <= 0: self.stop() self.repeat = 0 # BaseTimer Protected methods def _start(self): """ Start the toolkit timer. Subclasses should overrided this method. """ raise NotImplementedError() def _stop(self): """ Stop the toolkit timer. Subclasses should overrided this method. """ raise NotImplementedError() @abstractmethod def _perform(self): """ perform the appropriate action. Subclasses should overrided this method. """ raise NotImplementedError() # ------------------------------------------------------------------------- # Private interface # ------------------------------------------------------------------------- # Trait property handlers ------------------------------------------------ def _get_active(self): return self._active def _set_active(self, value): if value: self.start() else: self.stop()
class VectorsFactory(DataModuleFactory): """Applies the Vectors mayavi module to the given data object source (Mayavi source, or VTK dataset). """ _target = Instance(modules.Vectors, ()) scale_factor = CFloat(1., adapts='glyph.glyph.scale_factor', desc="""the scaling applied to the glyphs. The size of the glyph is by default in drawing units.""") scale_mode = Trait('vector', { 'none': 'data_scaling_off', 'scalar': 'scale_by_scalar', 'vector': 'scale_by_vector' }, help="""the scaling mode for the glyphs ('vector', 'scalar', or 'none').""") resolution = CInt(8, desc="The resolution of the glyph created. For " "spheres, for instance, this is the number of " "divisions along theta and phi.") mask_points = Either(None, CInt, desc="If supplied, only one out of 'mask_points' " "data point is displayed. This option is useful " "to reduce the number of points displayed " "on large datasets") def _resolution_changed(self): glyph = self._target.glyph.glyph_source.glyph_source if hasattr(glyph, 'theta_resolution'): glyph.theta_resolution = self.resolution if hasattr(glyph, 'phi_resolution'): glyph.phi_resolution = self.resolution if hasattr(glyph, 'resolution'): glyph.resolution = self.resolution if hasattr(glyph, 'shaft_resolution'): glyph.shaft_resolution = self.resolution if hasattr(glyph, 'tip_resolution'): glyph.tip_resolution = self.resolution def _mask_points_changed(self): if self.mask_points is not None: self._target.glyph.mask_input_points = True self._target.glyph.mask_points.on_ratio = self.mask_points def _scale_mode_changed(self): self._target.glyph.scale_mode = self.scale_mode_ mode = Trait('2darrow', glyph_mode_dict, desc="""the mode of the glyphs.""") def _mode_changed(self): v = self._target # Workaround for different version of VTK: if hasattr(v.glyph.glyph_source, 'glyph_source'): g = v.glyph.glyph_source else: g = v.glyph if self.mode == 'point': g.glyph_source = tvtk.PointSource(radius=0, number_of_points=1) else: g.glyph_source = g.glyph_list[self.mode_] if self.mode_ == 0: g.glyph_source.glyph_type = self.mode[2:]
class AttributeDragTool(ValueDragTool): """ Tool which modifies a model's attributes as it drags This is designed to cover the simplest of drag cases where the drag is modifying one or two numerical attributes on an underlying model. To use, simply provide the model object and the attributes that you want to be changed by the drag. If only one attribute is required, the other can be left as an empty string. """ #: the model object which has the attributes we are modifying model = Any #: the name of the attributes that is modified by horizontal motion x_attr = Str #: the name of the attributes that is modified by vertical motion y_attr = Str #: max and min values for x value x_bounds = Tuple(Either(Float, Str, None), Either(Float, Str, None)) #: max and min values for y value y_bounds = Tuple(Either(Float,Str, None), Either(Float, Str, None)) x_name = Str y_name = Str # ValueDragTool API def get_value(self): """ Get the current value of the attributes Returns a 2-tuple of (x, y) values. If either x_attr or y_attr is the empty string, then the corresponding component of the tuple is None. """ x_value = None y_value = None if self.x_attr: x_value = getattr(self.model, self.x_attr) if self.y_attr: y_value = getattr(self.model, self.y_attr) return (x_value, y_value) def set_delta(self, value, delta_x, delta_y): """ Set the current value of the attributes Set the underlying attribute values based upon the starting value and the provided deltas. The values are simply set to the sum of the appropriate coordinate and the delta. If either x_attr or y_attr is the empty string, then the corresponding component of is ignored. Note that setting x and y are two separate operations, and so will fire two trait notification events. """ inspector_value = {} if self.x_attr: x_value = value[0] + delta_x if self.x_bounds[0] is not None: if isinstance(self.x_bounds[0], six.string_types): m = getattr(self.model, self.x_bounds[0]) else: m = self.x_bounds[0] x_value = max(x_value, m) if self.x_bounds[1] is not None: if isinstance(self.x_bounds[1], six.string_types): M = getattr(self.model, self.x_bounds[1]) else: M = self.x_bounds[1] x_value = min(x_value, M) setattr(self.model, self.x_attr, x_value) inspector_value[self.x_name] = x_value if self.y_attr: y_value = value[1] + delta_y if self.y_bounds[0] is not None: if isinstance(self.y_bounds[0], six.string_types): m = getattr(self.model, self.y_bounds[0]) else: m = self.y_bounds[0] y_value = max(y_value, m) if self.y_bounds[1] is not None: if isinstance(self.y_bounds[1], six.string_types): M = getattr(self.model, self.y_bounds[1]) else: M = self.y_bounds[1] y_value = min(y_value, M) setattr(self.model, self.y_attr, y_value) inspector_value[self.y_name] = y_value self.new_value = inspector_value def _x_name_default(self): return self.x_attr.replace('_', ' ').capitalize() def _y_name_default(self): return self.y_attr.replace('_', ' ').capitalize()
class MassSpecPersistenceSpec(Loggable): runid = CStr labnumber = CStr aliquot = Either(CInt, Str) step = Str irradpos = CStr isotopes = Dict mass_spectrometer = Str extract_device = Str tray = Str position = Property(depends_on='_position') _position = Any timestamp = Float power_requested = Float(0) power_achieved = Float(0) extract_value = Float(0) duration = Float(0) cleanup = Float(0) duration_at_request = Float(0) first_stage_delay = CInt(0) runscript_name = Str runscript_text = Str comment = Str # data_path = Str # data_manager = Instance(H5DataManager, ()) update_rundatetime = Bool is_peak_hop = Bool peak_hop_detector = 'CDD' # ic_factor_v = Float # ic_factor_e = Float irradiation = Str level = Str irradiation_position = CInt production_ratios = Dict chron_segments = List interference_corrections = Dict production_name = Str j = Any shared_logger = True @property def second_stage_delay(self): return self.cleanup def load_record(self, record): attrs = [('labnumber', 'labnumber'), ('aliquot', 'aliquot'), ('step', 'step'), ('uuid', 'uuid'), ('irradpos', 'labnumber'), ('timestamp', 'timestamp'), ('extract_device', 'extract_device'), ('tray', 'tray'), ('position', 'position'), ('power_requested', 'extract_value'), # ('power_achieved', 'extract_value'), ('extract_value', 'extract_value'), ('duration', 'duration'), ('duration_at_request', 'duration'), ('first_stage_delay', 'duration'), ('cleanup', 'cleanup'), ('comment', 'comment'), ('irradiation', 'irradiation'), ('irradiation_position', 'irradiation_pos'), ('irradiation_pos', 'irradiation_pos'), ('level', 'irradiation_level'), ('irradiation_level', 'irradiation_level'), ('isotopes', 'isotopes'), ('tag', 'tag'), ('sample', 'sample'), ('material', 'material'), ('project', 'project'), ('mass_spectrometer', 'mass_spectrometer'), ('age', 'age'), ('age_err', 'age_err'), ('age_err_wo_j', 'age_err_wo_j'), ('age_err_wo_j_irrad', 'age_err_wo_j_irrad'), ('ar39decayfactor', 'ar39decayfactor'), ('ar37decayfactor', 'ar37decayfactor')] # if hasattr(record, 'spec'): # spec = record.spec # else: # spec = record for exp_attr, run_attr in attrs: if hasattr(record, run_attr): try: v = getattr(record, run_attr) self.debug('setting {} to {}'.format(exp_attr, v)) setattr(self, exp_attr, v) except TraitError as e: self.debug(e) # if hasattr(record, 'cdd_ic_factor'): # ic = record.cdd_ic_factor # if ic is None: # self.debug('Using default CDD IC factor 1.0') # ic = ufloat(1, 1.0e-20) # # self.ic_factor_v = float(ic.nominal_value) # self.ic_factor_e = float(ic.std_dev) # else: # self.debug('{} has no ic_factor attribute'.format(record, )) for a in ('chron_segments', 'production_ratios', 'interference_corrections', 'production_name', 'j'): if hasattr(record, a): setattr(self, a, getattr(record, a)) else: self.debug('no attribute {}'.format(a)) # def open_file(self): # return self.data_manager.open_file(self.data_path) def get_detector_by_isotope(self, key): try: return self.isotopes[key].detector except KeyError: return '' def iter_isotopes(self): return ((iso.name, iso.detector) for iso in self.isotopes.values()) # def _iter(): # dm = self.data_manager # hfile = dm._frame # root = dm._frame.root # signal = root.signal # for isogroup in hfile.list_nodes(signal): # for dettable in hfile.list_nodes(isogroup): # iso = isogroup._v_name # det = dettable.name # self.debug('iter_isotopes yield: {} {}'.format(iso, det)) # yield iso, det # # return _iter() def get_ncounts(self, iso): try: n = self.isotopes[iso].n except KeyError: n = 1 return n def get_baseline_position(self, iso): return 39.5 def get_blank_uvalue(self, iso): try: b = self.isotopes[iso].blank.get_baseline_corrected_value() except KeyError: self.debug('no blank for {} {}'.format(iso, list(self.isotopes.keys()))) b = ufloat(0, 0) return b def get_signal_uvalue(self, iso, det): try: ps = self.isotopes[iso].uvalue # ps = self.signal_intercepts['{}signal'.format(iso)] except KeyError as e: self.debug('no key {} {}'.format(iso, list(self.isotopes.keys()))) ps = ufloat(0, 0) return ps def get_signal_fit(self, iso): try: f = self.isotopes[iso].get_fit(-1) except KeyError: f = 'linear' return f def get_baseline_fit(self, det): return 'average_SEM' def get_baseline_data(self, iso, det, **kw): """ det is the original detector not the mass spec fooling detector """ self.debug('get baseline data {} {}'.format(iso, det)) # if self.is_peak_hop and det == self.peak_hop_detector: # iso = None return self._get_data('baseline', iso, det) def get_signal_data(self, iso, det, **kw): self.debug('get signal data {} {}'.format(iso, det)) return self._get_data('signal', iso, det, **kw) def get_filtered_baseline_uvalue(self, iso, nsigma=2, niter=1, error='sem'): m, s, fncnts = 0, 0, 0 # n_filtered_pts = 0 if iso in self.isotopes: iso = self.isotopes[iso] xs, ys = iso.baseline.xs, iso.baseline.ys # s_dict={'filter_outliers':filter_outliers, # 'iterations':iterations, # 'std_devs':std_devs} # self.dirty=notify fod = iso.baseline.filter_outliers_dict niter = fod.get('iterations', niter) nsigma = fod.get('std_devs', nsigma) # reg = MeanRegressor(xs=xs, ys=ys) # reg.calculate() # reg. for i in range(niter): m, s = mean(ys), std(ys, ddof=1) res = abs(ys - m) outliers = where(res > (s * nsigma))[0] ys = delete(ys, outliers) # n_filtered_pts += len(outliers) m, s = mean(ys), std(ys, ddof=1) fncnts = ys.shape[0] if error == 'sem': s = (s / fncnts ** 0.5) if fncnts else 0 return ufloat(m, s), fncnts def get_baseline_uvalue(self, iso): try: v = self.isotopes[iso].baseline.uvalue except KeyError: v = ufloat(0, 0) return v # def get_baseline_uvalue(self, det): # vb = [] # # dm = self.data_manager # hfile = dm._frame # root = dm._frame.root # v, e = 0, 0 # if hasattr(root, 'baseline'): # baseline = root.baseline # for isogroup in hfile.list_nodes(baseline): # for dettable in hfile.list_nodes(isogroup): # if dettable.name == det: # vb = [r['value'] for r in dettable.iterrows()] # break # # vb = array(vb) # v = vb.mean() # e = vb.std() # # return ufloat(v, e) # def _get_baseline_detector(self, iso, det): # if self.is_peak_hop: # det = self.peak_hop_detector # msg = 'is_peak_hop using peak_hop_det baseline {} for {}'.format(det, iso) # self.debug(msg) # return det def _get_data(self, group, iso, det, verbose=True): try: iso = self.isotopes[iso] if group != 'signal': iso = getattr(iso, group) t, v = iso.xs, iso.ys except KeyError: t, v = [0, ], [0, ] self.debug('Get data {} {} len t={}'.format(group, iso, len(t))) return t, v # # dm = self.data_manager # hfile = dm._frame # root = hfile.root # # try: # group = getattr(root, group) # if iso is None: # tab = next((di for ii in hfile.list_nodes(group) # for di in hfile.list_nodes(ii) # if di.name == det)) # else: # isog = getattr(group, iso) # tab = getattr(isog, det) # # data = [(row['time'], row['value']) # for row in tab.iterrows()] # t, v = zip(*data) # except (NoSuchNodeError, AttributeError, StopIteration): # import traceback # # if verbose: # self.debug(traceback.format_exc()) # # t, v = [0, ], [0, ] # # return t, v # @property # def runid(self): # return make_rid(self.labnumber, self.aliquot, self.step) def _set_position(self, pos): if pos: if ',' in pos: self._position = csv_to_ints(pos) else: self._position = pos def _get_position(self): return self._position
class ResultsTable(HasStrictTraits): # ------------------- # Required Attributes # ------------------- #: The model for the result table analysis_model = Instance(AnalysisModel) #: Adapter initialised with dummy columns to circumvent #: issues raised when setting up the View with no columns tabular_adapter = Instance(TabularAdapter, ()) # -------------------- # Dependent Attributes # -------------------- #: Selected evaluation steps in the table _selected_rows = Either(List(Tuple), None) #: When the selected row changes, this event will be triggered #: to return the index of that row, so that it can be scrolled to _scroll_to_row = Event(Int) # ---------- # Properties # ---------- #: Rows of the table_editor rows = Property(List(Tuple), depends_on="analysis_model.evaluation_steps") #: Columns of the table_editor columns = Property(List(ListColumn), depends_on="analysis_model.header") def _get_rows(self): return self.analysis_model.evaluation_steps def _get_columns(self): return [ ListColumn(label=name, index=index) for index, name in enumerate(self.analysis_model.header) ] # ---- # View # ---- def default_traits_view(self): editor = TabularEditor( adapter=self.tabular_adapter, show_titles=True, selected="_selected_rows", auto_update=False, multi_select=True, scroll_to_row="_scroll_to_row", scroll_to_row_hint="visible", editable=False, ) return View(UItem("rows", editor=editor)) # Response to model initialisation @on_trait_change("analysis_model.header") def _update_adapter(self): self.tabular_adapter.columns = [ name for name in self.analysis_model.header ] # Response to model change @on_trait_change("analysis_model.selected_step_indices") def update_table(self): """ Updates the selected row in the table according to the model """ if self.analysis_model.selected_step_indices is None: self._selected_rows = [] else: self._selected_rows = [ self.rows[ind] for ind in self.analysis_model.selected_step_indices ] # Response to new selection by user in UI @on_trait_change("_selected_rows[]") def update_model(self): """ Updates the model according to the selected row in the table """ if not self._selected_rows: self.analysis_model._selected_step_indices = None else: self.analysis_model.selected_step_indices = [ self.analysis_model.evaluation_steps.index(row) for row in self._selected_rows ] self._scroll_to_row = self.analysis_model.selected_step_indices[0]
class GridPlotContainer(BasePlotContainer): """ A GridPlotContainer consists of rows and columns in a tabular format. Each cell's width is the same as all other cells in its column, and each cell's height is the same as all other cells in its row. Although grid layout requires more layout information than a simple ordered list, this class keeps components as a simple list and exposes a **shape** trait. """ draw_order = Instance(list, args=(DEFAULT_DRAWING_ORDER,)) # The amount of space to put on either side of each component, expressed # as a tuple (h_spacing, v_spacing). spacing = Either(Tuple, List, Array) # The vertical alignment of objects that don't span the full height. valign = Enum("bottom", "top", "center") # The horizontal alignment of objects that don't span the full width. halign = Enum("left", "right", "center") # The shape of this container, i.e, (rows, columns). The items in # **components** are shuffled appropriately to match this # specification. If there are fewer components than cells, the remaining # cells are filled in with spaces. If there are more components than cells, # the remainder wrap onto new rows as appropriate. shape = Trait((0,0), Either(Tuple, List, Array)) # This property exposes the underlying grid structure of the container, # and is the preferred way of setting and reading its contents. # When read, this property returns a Numpy array with dtype=object; values # for setting it can be nested tuples, lists, or 2-D arrays. # The array is in row-major order, so that component_grid[0] is the first # row, and component_grid[:,0] is the first column. The rows are ordered # from top to bottom. component_grid = Property # The internal component grid, in row-major order. This gets updated # when any of the following traits change: shape, components, grid_components _grid = Array _cached_total_size = Any _h_size_prefs = Any _v_size_prefs = Any class SizePrefs(object): """ Object to hold size preferences across spans in a particular dimension. For instance, if SizePrefs is being used for the row axis, then each element in the arrays below express sizing information about the corresponding column. """ # The maximum size of non-resizable elements in the span. If an # element of this array is 0, then its corresponding span had no # non-resizable components. fixed_lengths = Array # The maximum preferred size of resizable elements in the span. # If an element of this array is 0, then its corresponding span # had no resizable components with a non-zero preferred size. resizable_lengths = Array # The direction of resizability associated with this SizePrefs # object. If this SizePrefs is sizing along the X-axis, then # direction should be "h", and correspondingly for the Y-axis. direction = Enum("h", "v") # The index into a size tuple corresponding to our orientation # (0 for horizontal, 1 for vertical). This is derived from # **direction** in the constructor. index = Int(0) def __init__(self, length, direction): """ Initializes this prefs object with empty arrays of the given length and with the given direction. """ self.fixed_lengths = zeros(length) self.resizable_lengths = zeros(length) self.direction = direction if direction == "h": self.index = 0 else: self.index = 1 return def update_from_component(self, component, index): """ Given a component at a particular index along this SizePref's axis, integrates the component's resizability and sizing information into self.fixed_lengths and self.resizable_lengths. """ resizable = self.direction in component.resizable pref_size = component.get_preferred_size() self.update_from_pref_size(pref_size[self.index], index, resizable) def update_from_pref_size(self, pref_length, index, resizable): if resizable: if pref_length > self.resizable_lengths[index]: self.resizable_lengths[index] = pref_length else: if pref_length > self.fixed_lengths[index]: self.fixed_lengths[index] = pref_length return def get_preferred_size(self): return amax((self.fixed_lengths, self.resizable_lengths), axis=0) def compute_size_array(self, size): """ Given a length along the axis corresponding to this SizePref, returns an array of lengths to assign each cell, taking into account resizability and preferred sizes. """ # There are three basic cases for each column: # 1. size < total fixed size # 2. total fixed size < size < fixed size + resizable preferred size # 3. fixed size + resizable preferred size < size # # In all cases, non-resizable components get their full width. # # For resizable components with non-zero preferred size, the following # actions are taken depending on case: # case 1: They get sized to 0. # case 2: They get a fraction of their preferred size, scaled based on # the amount of remaining space after non-resizable components # get their full size. # case 3: They get their full preferred size. # # For resizable components with no preferred size (indicated in our scheme # by having a preferred size of 0), the following actions are taken # depending on case: # case 1: They get sized to 0. # case 2: They get sized to 0. # case 3: All resizable components with no preferred size split the # remaining space evenly, after fixed width and resizable # components with preferred size get their full size. fixed_lengths = self.fixed_lengths resizable_lengths = self.resizable_lengths return_lengths = zeros_like(fixed_lengths) fixed_size = sum(fixed_lengths) fixed_length_indices = fixed_lengths > resizable_lengths resizable_indices = resizable_lengths > fixed_lengths fully_resizable_indices = (resizable_lengths + fixed_lengths == 0) preferred_size = sum(fixed_lengths[fixed_length_indices]) + \ sum(resizable_lengths[~fixed_length_indices]) # Regardless of the relationship between available space and # resizable preferred sizes, columns/rows where the non-resizable # component is largest will always get that amount of space. return_lengths[fixed_length_indices] = fixed_lengths[fixed_length_indices] if size <= fixed_size: # We don't use fixed_length_indices here because that mask is # just where non-resizable components were larger than resizable # ones. If our allotted size is less than the total fixed size, # then we should give all non-resizable components their desired # size. indices = fixed_lengths > 0 return_lengths[indices] = fixed_lengths[indices] return_lengths[~indices] = 0 elif size > fixed_size and (fixed_lengths > resizable_lengths).all(): # If we only have to consider non-resizable lengths, and we have # extra space available, then we need to give each column an # amount of extra space corresponding to its size. desired_space = sum(fixed_lengths) if desired_space > 0: scale = size / desired_space return_lengths = (fixed_lengths * scale).astype(int) elif size <= preferred_size or not fully_resizable_indices.any(): # If we don't have enough room to give all the non-fully resizable # components their preferred size, or we have more than enough # room for them and no fully resizable components to take up # the extra space, then we just scale the resizable components # up or down based on the amount of extra space available. delta_lengths = resizable_lengths[resizable_indices] - \ fixed_lengths[resizable_indices] desired_space = sum(delta_lengths) if desired_space > 0: avail_space = size - sum(fixed_lengths) #[fixed_length_indices]) scale = avail_space / desired_space return_lengths[resizable_indices] = (fixed_lengths[resizable_indices] + \ scale * delta_lengths).astype(int) elif fully_resizable_indices.any(): # We have enough room to fit all the non-resizable components # as well as components with preferred sizes, and room left # over for the fully resizable components. Give the resizable # components their desired amount of space, and then give the # remaining space to the fully resizable components. return_lengths[resizable_indices] = resizable_lengths[resizable_indices] avail_space = size - preferred_size count = sum(fully_resizable_indices) space = avail_space / count return_lengths[fully_resizable_indices] = space else: raise RuntimeError("Unhandled sizing case in GridContainer") return return_lengths def get_preferred_size(self, components=None): """ Returns the size (width,height) that is preferred for this component. Overrides PlotComponent. """ if self.fixed_preferred_size is not None: return self.fixed_preferred_size if components is None: components = self.component_grid else: # Convert to array; hopefully it is a list or tuple of list/tuples components = array(components) # These arrays track the maximum widths in each column and maximum # height in each row. numrows, numcols = self.shape no_visible_components = True self._h_size_prefs = GridPlotContainer.SizePrefs(numcols, "h") self._v_size_prefs = GridPlotContainer.SizePrefs(numrows, "v") self._pref_size_cache = {} for i, row in enumerate(components): for j, component in enumerate(row): if not self._should_layout(component): continue else: no_visible_components = False self._h_size_prefs.update_from_component(component, j) self._v_size_prefs.update_from_component(component, i) total_width = sum(self._h_size_prefs.get_preferred_size()) + self.hpadding total_height = sum(self._v_size_prefs.get_preferred_size()) + self.vpadding total_size = array([total_width, total_height]) # Account for spacing. There are N+1 of spaces, where N is the size in # each dimension. if self.spacing is None: spacing = zeros(2) else: spacing = array(self.spacing) total_spacing = array(components.shape[::-1]) * spacing * 2 * (total_size>0) total_size += total_spacing for orientation, ndx in (("h", 0), ("v", 1)): if (orientation not in self.resizable) and \ (orientation not in self.fit_components): total_size[ndx] = self.outer_bounds[ndx] elif no_visible_components or (total_size[ndx] == 0): total_size[ndx] = self.default_size[ndx] self._cached_total_size = total_size if self.resizable == "": return self.outer_bounds else: return self._cached_total_size def _do_layout(self): # If we don't have cached size_prefs, then we need to call # get_preferred_size to build them. if self._cached_total_size is None: self.get_preferred_size() # If we need to fit our components, then rather than using our # currently assigned size to do layout, we use the preferred # size we computed from our components. size = array(self.bounds) if self.fit_components != "": self.get_preferred_size() if "h" in self.fit_components: size[0] = self._cached_total_size[0] - self.hpadding if "v" in self.fit_components: size[1] = self._cached_total_size[1] - self.vpadding # Compute total_spacing and spacing, which are used in computing # the bounds and positions of all the components. shape = array(self._grid.shape).transpose() if self.spacing is None: spacing = array([0,0]) else: spacing = array(self.spacing) total_spacing = spacing * 2 * shape # Compute the total space used by non-resizable and resizable components # with non-zero preferred sizes. widths = self._h_size_prefs.compute_size_array(size[0] - total_spacing[0]) heights = self._v_size_prefs.compute_size_array(size[1] - total_spacing[1]) # Set the baseline h and v positions for each cell. Resizable components # will get these as their position, but non-resizable components will have # to be aligned in H and V. summed_widths = cumsum(hstack(([0], widths[:-1]))) summed_heights = cumsum(hstack(([0], heights[-1:0:-1]))) h_positions = (2*(arange(self._grid.shape[1])+1) - 1) * spacing[0] + summed_widths v_positions = (2*(arange(self._grid.shape[0])+1) - 1) * spacing[1] + summed_heights v_positions = v_positions[::-1] # Loop over all rows and columns, assigning position, setting bounds for # resizable components, and aligning non-resizable ones valign = self.valign halign = self.halign for j, row in enumerate(self._grid): for i, component in enumerate(row): if not self._should_layout(component): continue r = component.resizable x = h_positions[i] y = v_positions[j] w = widths[i] h = heights[j] if "v" not in r: # Component is not vertically resizable if valign == "top": y += h - component.outer_height elif valign == "center": y += (h - component.outer_height) / 2 if "h" not in r: # Component is not horizontally resizable if halign == "right": x += w - component.outer_width elif halign == "center": x += (w - component.outer_width) / 2 component.outer_position = [x,y] bounds = list(component.outer_bounds) if "h" in r: bounds[0] = w if "v" in r: bounds[1] = h component.outer_bounds = bounds component.do_layout() return def _reflow_layout(self): """ Re-computes self._grid based on self.components and self.shape. Adjusts self.shape accordingly. """ numcells = self.shape[0] * self.shape[1] if numcells < len(self.components): numrows, numcols = divmod(len(self.components), self.shape[0]) self.shape = (numrows, numcols) grid = array(self.components, dtype=object) grid.resize(self.shape) grid[grid==0] = None self._grid = grid self._layout_needed = True return def _shape_changed(self, old, new): self._reflow_layout() def __components_changed(self, old, new): self._reflow_layout() def __components_items_changed(self, event): self._reflow_layout() def _get_component_grid(self): return self._grid def _set_component_grid(self, val): grid = array(val) grid_set = set(grid.flatten()) # Figure out which of the components in the component_grid are new, # and which have been removed. existing = set(array(self._grid).flatten()) new = grid_set - existing removed = existing - grid_set for component in removed: if component is not None: component.container = None for component in new: if component is not None: if component.container is not None: component.container.remove(component) component.container = self self.set(shape=grid.shape, trait_change_notify=False) self._components = list(grid.flatten()) if self._should_compact(): self.compact() self.invalidate_draw() return
class DecoratedScene(Scene): """A VTK interactor scene which provides a convenient toolbar that allows the user to set the camera view, turn on the axes indicator etc. """ ####################################################################### # Traits ####################################################################### if hasattr(tvtk, 'OrientationMarkerWidget'): # The tvtk orientation marker widget. This only exists in VTK # 5.x. marker = Instance(tvtk.OrientationMarkerWidget, ()) # The tvtk axes that will be shown for the orientation. axes = Instance(tvtk.AxesActor, ()) else: marker = None axes = None # Determine if the orientation axis is shown or not. show_axes = Bool(False) # The list of actions represented in the toolbar actions = List(Either(Action, Group)) ########################################################################## # `object` interface ########################################################################## def __init__(self, parent, **traits): super(DecoratedScene, self).__init__(parent, **traits) self._setup_axes_marker() def __get_pure_state__(self): """Allows us to pickle the scene.""" # The control attribute is not picklable since it is a VTK # object so we remove it. d = super(DecoratedScene, self).__get_pure_state__() for x in ['_content', '_panel', '_tool_bar', 'actions']: d.pop(x, None) return d ########################################################################## # Non-public interface. ########################################################################## def _create_control(self, parent): """ Create the toolkit-specific control that represents the widget. Overridden to wrap the Scene control within a panel that also contains a toolbar. """ # Create a panel as a wrapper of the scene toolkit control. This # allows us to also add additional controls. self._panel = QtGui.QMainWindow() # Add our toolbar to the panel. tbm = self._get_tool_bar_manager() self._tool_bar = tbm.create_tool_bar(self._panel) self._panel.addToolBar(self._tool_bar) # Create the actual scene content self._content = super(DecoratedScene, self)._create_control(self._panel) self._panel.setCentralWidget(self._content) return self._panel def _setup_axes_marker(self): axes = self.axes if axes is None: # For VTK versions < 5.0. return axes.trait_set( normalized_tip_length=(0.4, 0.4, 0.4), normalized_shaft_length=(0.6, 0.6, 0.6), shaft_type='cylinder' ) p = axes.x_axis_caption_actor2d.caption_text_property axes.y_axis_caption_actor2d.caption_text_property = p axes.z_axis_caption_actor2d.caption_text_property = p p.trait_set(color=(1,1,1), shadow=False, italic=False) self._background_changed(self.background) self.marker.trait_set(key_press_activation=False) self.marker.orientation_marker = axes def _get_tool_bar_manager(self): """ Returns the tool_bar_manager for this scene. """ tbm = ToolBarManager( *self.actions ) return tbm def _get_image_path(self): """Returns the directory which contains the images used by the toolbar.""" # So that we can find the images. import tvtk.pyface.api return dirname(tvtk.pyface.api.__file__) def _toggle_projection(self): """ Toggle between perspective and parallel projection, this is used for the toolbar. """ if self._panel is not None: self.parallel_projection = not self.parallel_projection def _toggle_axes(self, *args): """Used by the toolbar to turn on/off the axes indicator. """ if self._panel is not None: self.show_axes = not self.show_axes def _save_snapshot(self): """Invoked by the toolbar menu to save a snapshot of the scene to an image. Note that the extension of the filename determines what image type is saved. The default is PNG. """ if self._panel is not None: path = popup_save(self._panel) if len(path) > 0: # The extension of the path will determine the actual # image type saved. self.save(path) def _configure_scene(self): """Invoked when the toolbar icon for configuration is clicked. """ self.edit_traits() ###################################################################### # Trait handlers. ###################################################################### def _show_axes_changed(self): marker = self.marker if (self._vtk_control is not None) and (marker is not None): if not self.show_axes: marker.interactor = None marker.enabled = False else: marker.interactor = self.interactor marker.enabled = True self.render() def _background_changed(self, value): # Depending on the background, this sets the axes text and # outline color to something that should be visible. axes = self.axes if (self._vtk_control is not None) and (axes is not None): p = self.axes.x_axis_caption_actor2d.caption_text_property m = self.marker s = value[0] + value[1] + value[2] if s <= 1.0: p.color = (1, 1, 1) else: p.color = (0, 0, 0) try: m.outline_color = p.color # VTK 9+ except TraitError: m.set_outline_color(*p.color) self.render() def _actions_default(self): return [ Group( Action( image = ImageResource('16x16/x-axis', search_path = [self._get_image_path()], ), tooltip = "View along the -X axis", on_perform = self.x_minus_view, ), Action( image = ImageResource('16x16/x-axis', search_path = [self._get_image_path()], ), tooltip = "View along the +X axis", on_perform = self.x_plus_view, ), Action( image = ImageResource('16x16/y-axis', search_path = [self._get_image_path()], ), tooltip = "View along the -Y axis", on_perform = self.y_minus_view, ), Action( image = ImageResource('16x16/y-axis', search_path = [self._get_image_path()], ), tooltip = "View along the +Y axis", on_perform = self.y_plus_view, ), Action( image = ImageResource('16x16/z-axis', search_path = [self._get_image_path()], ), tooltip = "View along the -Z axis", on_perform = self.z_minus_view, ), Action( image = ImageResource('16x16/z-axis', search_path = [self._get_image_path()], ), tooltip = "View along the +Z axis", on_perform = self.z_plus_view, ), Action( image = ImageResource('16x16/isometric', search_path = [self._get_image_path()], ), tooltip = "Obtain an isometric view", on_perform = self.isometric_view, ), ), Group( Action( image = ImageResource('16x16/parallel', search_path = [self._get_image_path()], ), tooltip = 'Toggle parallel projection', style="toggle", on_perform = self._toggle_projection, checked = self.parallel_projection, ), Action( image = ImageResource('16x16/origin_glyph', search_path = [self._get_image_path()], ), tooltip = 'Toggle axes indicator', style="toggle", enabled=(self.marker is not None), on_perform = self._toggle_axes, checked = self.show_axes, ), Action( image = ImageResource('16x16/fullscreen', search_path = [self._get_image_path()], ), tooltip = 'Full Screen (press "q" or "e" or Esc to exit fullscreen)', style="push", on_perform = self._full_screen_fired, ), ), Group( Action( image = ImageResource('16x16/save', search_path = [self._get_image_path()], ), tooltip = "Save a snapshot of this scene", on_perform = self._save_snapshot, ), Action( image = ImageResource('16x16/configure', search_path = [self._get_image_path()], ), tooltip = 'Configure the scene', style="push", on_perform = self._configure_scene, ), ), ]
class FloatValidator(Validator): """ A concrete Validator which handles floating point input. This validator ensures that the text represents a floating point number within a specified range. """ #: The minimum value allowed for the float, inclusive, or None if #: there is no lower bound. minimum = Either(None, Float) #: The maximum value allowed for the float, inclusive, or None if #: there is no upper bound. maximum = Either(None, Float) #: Whether or not to allow exponents like '1e6' in the input. allow_exponent = Bool(True) def convert(self, text): """ Converts the text to a floating point value. Parameters ---------- text : unicode The unicode text to convert to an integer. Returns ------- result : float The floating point value for the converted text. Raises ------ ValueError A ValueError will be raised if the conversion fails. """ return float(text) def validate(self, text, component): """ Validates the given text matches the float range. Parameters ---------- text : unicode The unicode text edited by the client widget. component : Declarative The declarative component currently making use of the validator. Returns ------- result : (unicode, bool) A 2-tuple of (optionally modified) unicode text, and whether or not that text should be considered valid. """ try: value = self.convert(text) except ValueError: return (text, False) minimum = self.minimum if minimum is not None and value < minimum: return (text, False) maximum = self.maximum if maximum is not None and value > maximum: return (text, False) if not self.allow_exponent and 'e' in text.lower(): return (text, False) return (text, True) def client_validator(self): """ The client side float validator. Returns ------- result : dict The dict representation of the client side float validator. """ res = {} res['type'] = 'float' res['message'] = self.message res['arguments'] = { 'minimum': self.minimum, 'maximum': self.maximum, 'allow_exponent': self.allow_exponent } return res
finally: self._disable_update = False if trait_change_notify: self.update() return self ###################################################################### # Non-public interface. ###################################################################### def _m_data_changed(self, ds): if not hasattr(ds, 'mlab_source'): ds.add_trait('mlab_source', Instance(MlabSource)) ds.mlab_source = self ArrayOrNone = Either(None, CArray, comparison_mode=NO_COMPARE) ArrayNumberOrNone = Either(None, CArrayOrNumber, comparison_mode=NO_COMPARE) ############################################################################### # `MGlyphSource` class. ############################################################################### class MGlyphSource(MlabSource): """ This class represents a glyph data source for Mlab objects and allows the user to set the x, y, z, scalar/vector attributes. """ # The x, y, z and points of the glyphs. x = ArrayNumberOrNone y = ArrayNumberOrNone
class Value(HasTraits): name = Str value = Either(Float, Int)
class AnalysisModel(HasStrictTraits): #: Tuple of column names of the analysis model. The names are defined #: by the MCOStartEvent parsed in ``_server_event_mainthread()`` in #: :class:`WFManagerSetupTask #: <force_wfmanager.wfmanager_setup_task.WfManagerSetupTask>`. header = Tuple() #: The current row of the model. The received data is added to the #: `_row_data` first, before the whole row is added to the model table. _row_data = Dict(key_trait=Str) #: Storage for any metadata that is received along with the MCO data _row_metadata = Dict(key_trait=Str) #: Private trait of the `evaluation_steps` property _evaluation_steps = List(Tuple()) #: Private trait of the `_step_metadata` property _step_metadata = List(Dict()) #: Evaluation steps, each evaluation step is a tuple of parameter values, #: received from the a single Workflow execution. The order of #: the parameters in each evaluation step must match the order of #: value_names evaluation_steps = Property(List(Tuple()), depends_on="_evaluation_steps") #: Metadata associated with each evaluation step step_metadata = Property(List(Dict()), depends_on="_step_metadata") #: Tracks whether the current state of AnalysisModel can be exported _export_enabled = Bool(False) #: If there are results, then they can be exported export_enabled = Property(Bool(), depends_on="_export_enabled") #: Selected step, used for highlighting in the table/plot. #: If selected, it must be in the allowed range of values. _selected_step_indices = Either(None, List(Int())) #: Property that informs about the currently selected step. selected_step_indices = Property( Either(None, List(Int)), depends_on="_selected_step_indices" ) #: Indicates whether there is any data stored in the AnalysisModel is_empty = Property(Bool(), depends_on="_evaluation_steps") def _header_default(self): return () def _row_data_default(self): return dict.fromkeys(self.header) def _row_metadata_default(self): return {} def _get_export_enabled(self): return self._export_enabled def _get_step_metadata(self): return self._step_metadata def _get_evaluation_steps(self): return self._evaluation_steps def _get_selected_step_indices(self): return self._selected_step_indices def _set_selected_step_indices(self, values): """ Check the requested indices of selected rows, and use the requested values if they exist in the table, or are None. """ if values is None: self._selected_step_indices = values return for value in values: if value > len(self.evaluation_steps) or value < 0: raise ValueError( f"Invalid value for selection index {value}. " "It must be a positive Int less or equal to " f"{len(self.evaluation_steps)-1}" ) self._selected_step_indices = values def _get_is_empty(self): return not bool(len(self.evaluation_steps)) def notify(self, data, metadata=False): """ Public method to add `data` to the AnalysisModel. If no `header` is set up, the `data` is considered to be a `header`. If the metadata keyword is set to True the `data` is treated as metadata to be associated with the current row. Otherwise, the `data` is treated as to be added to the current row. """ if not self.header: self._add_header(data) elif metadata: self._add_metadata(data) else: self._add_data(data) def _add_header(self, header): """ Creates the header of the AnalysisModel. Updates the current row to the default value with the header values as the keys.""" try: self.header = header except TraitError as e: log.error( f"The Header of the AnalysisModel can't be defined by " f"the {header}. A list or tuple of strings is required." ) raise e else: self._row_data = self._row_data_default() def _add_data(self, data): """ For a prepared AnalysisModel (when the header is already set up), adds `data` to the current row. The way the `data` is added depends on the format of the argument. If `data` is a dict-like object, `_add_cell` is called. Otherwise, `_add_cells` is called.""" try: for key, value in data.items(): self._add_cell(key, value) except AttributeError: self._add_cells(data) self._finalize_row() def _add_metadata(self, data): """ For each evaluation step, add optional metadata. Expects the `data` attribute to be a dictionary containing key value pairs to be associated with the current row. """ try: self._row_metadata.update(data) except (TraitError, TypeError, ValueError): pass def _add_cell(self, label, value): """ Inserts a `value` into the column of the current row (self.row_data) with the `label` label.""" if label in self.header: self._row_data.update({label: value}) else: log.warning( f"The AnalysisModel does not have the {label} column." f"The value {value} has not been added to the table." ) def _add_cells(self, data): """ Inserts the `data` to the `self.row_data`, starting from the first cell until every `data` element is inserted, or `self.row_data` is not completed.`""" for column, value in zip(self.header, data): self._row_data[column] = value def _finalize_row(self): """ Finalizes the `self.row_data` update: adds the row data to the table, and empties the row data for the next row.""" row_data = tuple( self._row_data.get(label, None) for label in self.header ) self._add_evaluation_step(row_data) self._step_metadata.append(self._row_metadata) self._row_data = self._row_data_default() self._row_metadata = self._row_metadata_default() @on_trait_change("header") def clear_steps(self): """ Removes all entries in the list :attr:`evaluation_steps` and sets :attr:`selected_step_indices` to None but does not clear :attr:`value_names` """ self._evaluation_steps[:] = [] self._step_metadata[:] = [] self._selected_step_indices = None self._export_enabled = False def _add_evaluation_step(self, evaluation_step): """ Add the completed row data to the evaluation steps table. Parameters --------- evaluation_step: tuple """ if len(self.header) == 0: raise ValueError( "Cannot add evaluation step to an empty Analysis model" ) if len(evaluation_step) != len(self.header): raise ValueError( "Size of evaluation step is incompatible with the length of " "the header." ) self._evaluation_steps.append(evaluation_step) self._export_enabled = True def column(self, label): """ Returns a list of values from the column of the AnalysisModel. If `label` is a string, the corresponding column index is inferred from the AnalysisModel.header. If `label` is an int, it defines the column index.""" column_error = ValueError( f"Column of the AnalysisModel with label {label}" " doesn't exist. The label must be a string or int." ) if label in self.header: index = self.header.index(label) elif isinstance(label, int): index = label else: raise column_error if index >= len(self.header): raise column_error data = [step[index] for step in self.evaluation_steps] return data def clear(self): """ Sets :attr:`value_names` to be empty, removes all entries in the list :attr:`evaluation_steps` and sets :attr:`selected_step_indices` to None""" self.header = self._header_default() def from_json(self, data): """ Delete all current data and load :attr:`value_names` and :attr:`evaluation_steps` from a dictionary. """ self.clear() if not data: return try: header = data["header"] except KeyError: error = ( "AnalysisModel can't be instantiated from a data dictionary" " that does not contain a header." ) log.error(error) raise KeyError(error) else: self.notify(tuple(header)) for index in range(1, len(data)): try: step = data[str(index)] except KeyError: log.warning( f"Can't find a row with index {index}. This index will " f"be skipped in the AnalysisModel." ) else: # TODO: This format is now deprecated and should be removed # in version 0.7.0 # https://github.com/force-h2020/force-wfmanager/issues/414 if isinstance(step, list): log.warning( 'Project file format is deprecated and will be removed' ' in version 0.7.0') self.notify(step) else: self.notify(step['metadata'], metadata=True) self.notify(step['data']) def to_json(self): """ Returns a dictionary representation with column names as keys and column values as values.""" return self.__getstate__() def __getstate__(self): """ Returns a dictionary representation with column names as keys and column values as values. Returns ------- data: a dictionary containing the column-wise representation of the `self`. """ data = {"header": self.header} for index, row in enumerate(self.evaluation_steps, start=1): metadata = self.step_metadata[index-1] data[index] = {'data': row, 'metadata': metadata} return data def write(self, filename, *, mode="w"): if filename.endswith(".csv"): self.dump_csv(filename, mode=mode) elif filename.endswith(".json"): self.dump_json(filename, mode=mode) else: raise IOError( "AnalysisModel can only write to .json or .csv formats." ) def dump_json(self, filename, *, mode="w"): """ Writes the AnalysisModel to a `filename` file in json format, including both data and metadata values. Can be used to save the state of the analysis.""" if not self.export_enabled: return False with open(filename, mode) as file: json.dump(self.__getstate__(), file, indent=4) return True def dump_csv(self, filename, *, mode="w"): """ Writes the AnalysisModel to a `filename` file in csv format, but does not include metadata values. Should be only be used to export MCO parameter and KPI data, rather than saving the state of the analysis. """ if not self.export_enabled: return False with open(filename, mode) as file: writer = csv.writer(file) writer.writerow(self.header) for step in self.evaluation_steps: writer.writerow(step) return True
class DataFrameAdapter(TabularAdapter): """ Generic tabular adapter for data frames """ #: The text to use for a generic entry. text = Property #: The alignment for each cell alignment = Property(Enum('left', 'center', 'right')) #: The text to use for a row index. index_text = Property #: The alignment to use for a row index. index_alignment = Property #: The font to use for each column font = Property #: The format to use for each column format = Property #: The format for each element, or a mapping column ID to format. _formats = Either(Str, Dict, default='%s') #: The font for each element, or a mapping column ID to font. _fonts = Either(Font, Dict, default='Courier 10') def _get_index_alignment(self): import numpy as np index = getattr(self.object, self.name).index if np.issubdtype(index.dtype, np.number): return 'right' else: return 'left' def _get_alignment(self): import numpy as np column = self.item[self.column_id] #print(self.columns) #print(column.name, column.dtype) if np.issubdtype(column.dtype, np.number): return 'right' else: return 'left' def _get_font(self): if isinstance(self._fonts, toolkit_object('font_trait:TraitsFont')): return self._fonts else: return self._fonts.get(self.column_id, 'Courier 10') def _get_format(self): if isinstance(self._formats, str): return self._formats else: return self._formats.get(self.column_id, '%s') def _get_content(self): return self.item[self.column_id].iloc[0] def _get_text(self): format = self.get_format(self.object, self.name, self.row, self.column) return format % self.get_content(self.object, self.name, self.row, self.column) def _set_text(self, value): column = self.item[self.column_id] dtype = column.dtype value = dtype.type(value) column.iloc[0] = value def _get_index_text(self): return str(self.item.index[0]) def _set_index_text(self, value): index = getattr(self.object, self.name).index dtype = index.dtype value = dtype.type(value) index.values[self.row] = value #---- Adapter methods that are not sensitive to item type ---------------- def get_item(self, object, trait, row): """ Override the base implementation to work with DataFrames This returns a dataframe with one row, rather than a series, since using a dataframe preserves dtypes. """ return getattr(object, trait).iloc[row:row + 1] def delete(self, object, trait, row): """ Override the base implementation to work with DataFrames Unavoidably does a copy of the data, setting the trait with the new value. """ import pandas as pd df = getattr(object, trait) if 0 < row < len(df) - 1: new_df = pd.concat([df.iloc[:row, :], df.iloc[row + 1:, :]]) elif row == 0: new_df = df.iloc[row + 1:, :] else: new_df = df.iloc[:row, :] setattr(object, trait, new_df) def insert(self, object, trait, row, value): """ Override the base implementation to work with DataFrames Unavoidably does a copy of the data, setting the trait with the new value. """ import pandas as pd df = getattr(object, trait) if 0 < row < len(df) - 1: new_df = pd.concat([df.iloc[:row, :], value, df.iloc[row:, :]]) elif row == 0: new_df = pd.concat([value, df]) else: new_df = pd.concat([df, value]) setattr(object, trait, new_df)
class ImagePlot(Base2DPlot): """ A plot based on an image. """ #------------------------------------------------------------------------ # Data-related traits #------------------------------------------------------------------------ # Overall alpha value of the image. Ranges from 0.0 for transparent to 1.0 # for full intensity. alpha = Trait(1.0, Range(0.0, 1.0)) # The interpolation method to use when rendering an image onto the GC. interpolation = Enum("nearest", "bilinear", "bicubic") #------------------------------------------------------------------------ # Private traits #------------------------------------------------------------------------ # Are the cache traits valid? If False, new ones need to be computed. _image_cache_valid = Bool(False) # Cached image of the bmp data (not the bmp data in self.data.value). _cached_image = Instance(GraphicsContextArray) # Tuple-defined rectangle (x, y, dx, dy) in screen space in which the # **_cached_image** is to be drawn. _cached_dest_rect = Either(Tuple, List) #------------------------------------------------------------------------ # Base2DPlot interface #------------------------------------------------------------------------ def _render(self, gc): """ Actually draws the plot. Implements the Base2DPlot interface. """ if not self._image_cache_valid: self._compute_cached_image() if "bottom" in self.origin: sy = -1 else: sy = 1 if "left" in self.origin: sx = 1 else: sx = -1 # If the orientation is flipped, the BR and TL cases are swapped if self.orientation == "v" and sx == sy: sx, sy = -sx, -sy with gc: gc.clip_to_rect(self.x, self.y, self.width, self.height) gc.set_alpha(self.alpha) # Kiva image interpolation note: # Kiva's Agg backend uses the interpolation setting of the *source* # image to determine the type of interpolation to use when drawing the # image. The mac backend uses the interpolation setting on the # destination GC. old_interp = self._cached_image.get_image_interpolation() if hasattr(gc, "set_interpolation_quality"): from kiva.quartz.ABCGI import InterpolationQuality interp_quality_dict = {"nearest": InterpolationQuality.none, "bilinear": InterpolationQuality.low, "bicubic": InterpolationQuality.high} gc.set_interpolation_quality(interp_quality_dict[self.interpolation]) elif hasattr(gc, "set_image_interpolation"): self._cached_image.set_image_interpolation(self.interpolation) x, y, w, h = self._cached_dest_rect if self.orientation == "h": # for horizontal orientation: gc.translate_ctm(x+w/2, y+h/2) # translate back normally else: # for vertical orientation: gc.translate_ctm(y+h/2, x+w/2) # translate back with dx,dy swap gc.scale_ctm(sx, sy) # flip axes as appropriate if self.orientation == "v": # for vertical orientation: gc.scale_ctm(1,-1) # restore origin to lower left gc.rotate_ctm(pi/2) # rotate 1/4 turn clockwise gc.translate_ctm(-x-w/2, -y-h/2) # translate image center to origin gc.draw_image(self._cached_image, self._cached_dest_rect) self._cached_image.set_image_interpolation(old_interp) def map_index(self, screen_pt, threshold=0.0, outside_returns_none=True, index_only=False): """ Maps a screen space point to an index into the plot's index array(s). Implements the AbstractPlotRenderer interface. Uses 0.0 for *threshold*, regardless of the passed value. """ # For image plots, treat hittesting threshold as 0.0, because it's # the only thing that really makes sense. return Base2DPlot.map_index(self, screen_pt, 0.0, outside_returns_none, index_only) #------------------------------------------------------------------------ # Private methods #------------------------------------------------------------------------ def _compute_cached_image(self, data=None): """ Computes the correct sub-image coordinates and renders an image into self._cached_image. The parameter *data* is for subclasses that might not store an RGB(A) image as the value, but need to compute one to display (colormaps, etc.). """ if data is None: data = self.value.data (lpt, upt) = self.index.get_bounds() ll_x, ll_y = self.map_screen([lpt])[0] ur_x, ur_y = self.map_screen([upt])[0] if "right" in self.origin: ll_x, ur_x = ur_x, ll_x if "top" in self.origin: ll_y, ur_y = ur_y, ll_y virtual_width = ur_x - ll_x virtual_height = ur_y - ll_y args = self.position \ + self.bounds \ + [ll_x, ll_y, virtual_width, virtual_height] img_pixels, gc_rect = self._calc_zoom_coords(*args) # Grab the appropriate sub-image, if necessary if img_pixels is not None: i1, j1, i2, j2 = img_pixels if "top" in self.origin: y_length = self.value.get_array_bounds()[1][1] j1 = y_length - j1 j2 = y_length - j2 # swap so that j1 < j2 j1, j2 = j2, j1 if "right" in self.origin: x_length = self.value.get_array_bounds()[0][1] i1 = x_length - i1 i2 = x_length - i2 # swap so that i1 < i2 i1, i2 = i2, i1 # Since data is row-major, j1 and j2 go first data = data[j1:j2, i1:i2] # Furthermore, the data presented to the GraphicsContextArray needs to # be contiguous. If it is not, we need to make a copy. if not data.flags['C_CONTIGUOUS']: data = data.copy() if data.shape[2] == 3: kiva_depth = "rgb24" elif data.shape[2] == 4: kiva_depth = "rgba32" else: raise RuntimeError, "Unknown colormap depth value: %i" \ % data.value_depth self._cached_image = GraphicsContextArray(data, pix_format=kiva_depth) if gc_rect is not None: self._cached_dest_rect = gc_rect else: self._cached_dest_rect = (ll_x, ll_y, virtual_width, virtual_height) self._image_cache_valid = True def _calc_zoom_coords(self, px, py, plot_width, plot_height, ix, iy, image_width, image_height): """ Calculates the coordinates of a zoomed sub-image. Because of floating point limitations, it is not advisable to request a extreme level of zoom, e.g., idx or idy > 10^10. Parameters ---------- px : number X-coordinate of plot pixel bounds py : number Y-coordinate of plot pixel bounds plot_width : number Width of plot pixel bounds plot_height : number Height of plot pixel bounds ix : number X-coordinate of image pixel bounds iy : number Y-coordinate of image pixel bounds image_width : number Width of image pixel bounds image_height : number Height of image pixel bounds Returns ------- ((i1, j1, i2, j2), (x, y, dx, dy)) Lower left and upper right indices of the sub-image to be extracted, and graphics context origin and extents to draw the sub-image into. (None, None) No image extraction is necessary. """ if (image_width < 1.5*plot_width) and (image_height < 1.5*plot_height): return (None, None) if 0 in (plot_width, plot_height, image_width, image_height): return (None, None) # We figure out the subimage coordinates using a two-step process: # 1. convert the plot boundaries from screen space into pixel offsets # in the virtual image # 2. convert the coordinates in the virtual image into indices # into the image data array # 3. from the data array indices, compute the screen coordinates of # the corners of the data array sub-indices # in all the cases below, x1,y1 refers to the lower-left corner, and # x2,y2 refers to the upper-right corner. # 1. screen space -> pixel offsets if self.orientation == "h": x1 = px - ix x2 = (px + plot_width) - ix y1 = py - iy y2 = (py + plot_height) - iy else: x1 = px - ix x2 = (px + plot_height) - ix y1 = py - iy y2 = (py + plot_width) - iy # 2. pixel offsets -> data array indices # X and Y are transposed because for image plot data pixel_bounds = self.value.get_array_bounds() xpixels = pixel_bounds[0][1] - pixel_bounds[0][0] ypixels = pixel_bounds[1][1] - pixel_bounds[1][0] i1 = max(floor(float(x1) / image_width * xpixels), 0) i2 = min(ceil(float(x2) / image_width * xpixels), xpixels) j1 = max(floor(float(y1) / image_height * ypixels), 0) j2 = min(ceil(float(y2) / image_height * ypixels), ypixels) # 3. array indices -> new screen space coordinates x1 = float(i1)/xpixels * image_width + ix x2 = float(i2)/xpixels * image_width + ix y1 = float(j1)/ypixels * image_height + iy y2 = float(j2)/ypixels * image_height + iy # Handle really, really, subpixel cases subimage_index = [i1, j1, i2, j2] subimage_coords = [x1, y1, x2-x1, y2-y1] plot_dimensions = (px, py, plot_width, plot_height) xparams = (0, 2) yparams = (1, 3) for pos_index, size_index in (xparams, yparams): if subimage_index[pos_index] == subimage_index[pos_index+2]-1: # xcoords lie inside the same pixel, so set the subimage # coords to be the width of the image subimage_coords[pos_index] = plot_dimensions[pos_index] subimage_coords[size_index] = plot_dimensions[size_index] elif subimage_index[pos_index] == subimage_index[pos_index+2]-2: # coords span across a pixel boundary. Find the scaling # factor of the virtual (and potentially large) subimage # size to the image size, and scale it down. We can do # this without distortion b/c we are straddling only one # pixel boundary. # # If we scale down the extent to twice the screen size, we can # be sure that no matter what the offset, we will cover the # entire screen, since we are only straddling one pixel boundary. # The formula for calculating the new origin can be worked out # on paper. extent = subimage_coords[size_index] pixel_extent = extent/2 # we are indexed into two pixels origin = subimage_coords[pos_index] scale = float(2 * plot_dimensions[size_index] / extent) subimage_coords[size_index] *= scale subimage_coords[pos_index] = origin + (1-scale)*pixel_extent subimage_index = map(int, subimage_index) return [subimage_index, subimage_coords] #------------------------------------------------------------------------ # Event handlers #------------------------------------------------------------------------ def _index_data_changed_fired(self): self._image_cache_valid = False self.request_redraw() def _index_mapper_changed_fired(self): self._image_cache_valid = False self.request_redraw() def _value_data_changed_fired(self): self._image_cache_valid = False self.request_redraw()