Ejemplo n.º 1
0
class TextHolder(Atom):
    text = Str()
Ejemplo n.º 2
0
class ReceiverChannel(PhysicalChannel):
    '''
    A trigger input on a receiver.
    '''
    channel = Str()
Ejemplo n.º 3
0
class ConfigureExecuteTask(InstrumentTask):
    """Configures the QM, executes the QUA program and fetches the results

    This task supports parameters in both the configuration and the
    QUA program.

    The program and config files are regular python file that should
    contain at least two top-level functions:

    - get_parameters() that should return the parameters dictionary
    of the file used to parametrize the config/program.

    - get_config(parameters)/get_program(parameters) for the
    configuration file and the program file respectively. The
    parameters argument is a dictionary containing the values entered
    by the users and should be converted to the appropriate python type
    before using it.

    The two files can be merged into one if wanted.

    """

    #: Path to the python configuration file
    path_to_config_file = Str().tag(pref=True)

    #: Path to the python program file
    path_to_program_file = Str().tag(pref=True)

    #: Path to the folder where the config and program files are saved
    path_to_save = Str(default="{default_path}/configs_and_progs").tag(
        pref=True)

    #: Prefix used when saving the configuration and program files
    save_prefix = Str(default="{meas_id}").tag(pref=True)

    #: Parameters entered by the user for the program and config
    parameters = Typed(dict).tag(pref=True)

    #: Comments associated with the parameters
    comments = Typed(dict).tag(pref=True)

    #: Duration of the simulation in ns
    simulation_duration = Str(default="1000").tag(pref=True)

    #: Doesn't wait for the program to end if this is on
    pause_mode = Bool(False).tag(pref=True)

    # : Create the entry which contains all the data return by the OPX in a recarray
    database_entries = set_default({'Results': {}})

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self._config_module = None
        self._program_module = None
        self.parameters = {}
        self.comments = {}

    def check(self, *args, **kwargs):
        test, traceback = super(ConfigureExecuteTask,
                                self).check(*args, **kwargs)

        if not test:
            return test, traceback

        if self._config_module is None or self._program_module is None:
            msg = ('Config or program missing')
            traceback[self.get_error_path() + '-trace'] = msg

        for key, value in self.parameters.items():
            try:
                self.format_and_eval_string(value)
            except Exception as e:
                msg = ("Couldn't evaluate {} : {}")
                traceback[self.get_error_path() + '-trace'] = msg.format(
                    value, e)

        return test, traceback

    def perform(self):
        self._update_parameters()

        # Evaluate all parameters
        evaluated_parameters = {}
        for key, value in self.parameters.items():
            evaluated_parameters[key] = self.format_and_eval_string(value)

        config_to_set = self._config_module.get_config(evaluated_parameters)
        program_to_execute = self._program_module.get_prog(
            evaluated_parameters)

        try:
            if self.path_to_save != "":
                path_str = self.format_string(self.path_to_save)
                root_path = Path(path_str)
                if not root_path.is_dir():
                    if root_path.exists():
                        logger.warning(
                            f"Couldn't save the config and program"
                            f"to {root_path} because {root_path} is "
                            f"not a directory")
                        raise NotADirectoryError
                    else:
                        root_path.mkdir(parents=True)

                save_prefix = self.format_string(self.save_prefix)

                config_path = root_path / f"{save_prefix}_config.py"
                program_path = root_path / f"{save_prefix}_program.py"

                shutil.copyfile(self.path_to_config_file, config_path)
                shutil.copyfile(self.path_to_program_file, program_path)

        except NotADirectoryError:
            pass

        self.driver.clear_all_job_results()
        self.driver.set_config(config_to_set)
        self.driver.execute_program(program_to_execute)

        if not self.pause_mode:
            self.driver.wait_for_all_results()
            results = self.driver.get_results()
            # report = self.driver.get_execution_report()
            # if report.has_errors():
            #     for e in report.errors():
            #         logger.warning(e)

            dt_array = []
            all_data = []
            for (name, handle) in results:
                if name.endswith('_input1') or name.endswith('_input2'):
                    name = name[:-7]
                all_data.append(handle.fetch_all(flat_struct=True))
                dt_array += [(name, all_data[-1].dtype, all_data[-1].shape)]
                self.write_in_database(f"variable_{name}", all_data[-1])
                if handle.has_dataloss():
                    logger.warning(f"{name} might have data loss")

            results_recarray = np.array([tuple(all_data)], dtype=dt_array)
            self.write_in_database('Results', results_recarray)

    def refresh_config(self):
        self._post_setattr_path_to_config_file(self.path_to_config_file,
                                               self.path_to_config_file)

    def refresh_program(self):
        self._post_setattr_path_to_program_file(self.path_to_program_file,
                                                self.path_to_program_file)

    def simulate(self):
        """Simulate the program using the OPX

        Is always executed outside of a measurement, during editing
        """
        self._update_parameters()

        # Evaluate all parameters
        evaluated_parameters = {}
        for key, value in self.parameters.items():
            evaluated_parameters[key] = self.format_and_eval_string(value)

        config_to_set = self._config_module.get_config(evaluated_parameters)
        program_to_execute = self._program_module.get_prog(
            evaluated_parameters)

        with self.test_driver() as driver:
            driver.set_config(config_to_set)
            driver.simulate_program(program_to_execute,
                                    duration=int(self.simulation_duration) //
                                    4)

    #--------------------------Private API------------------------------#

    #: Module containing the configuration file
    _config_module = Value()

    #: Module containing the program file
    _program_module = Value()

    def _post_setattr_path_to_program_file(self, old, new):
        self._program_module = None

        if new or new != '':
            importlib.invalidate_caches()
            try:
                spec = importlib.util.spec_from_file_location(
                    "", self.path_to_program_file)
                program_module = importlib.util.module_from_spec(spec)
                spec.loader.exec_module(program_module)
            except FileNotFoundError:
                logger.error(f"File {self.path_to_program_file} not found")
            except AttributeError:
                logger.error(f"File {self.path_to_program_file} is not a "
                             f"python file")
            except Exception as e:
                logger.error(f"An exception occurred when trying to import "
                             f"{self.path_to_program_file}")
                logger.error(e)
            else:
                self._program_module = program_module

        self._update_parameters()
        self._find_variables()

    def _post_setattr_path_to_config_file(self, old, new):
        self._config_module = None

        if new or new != '':
            importlib.invalidate_caches()
            try:
                spec = importlib.util.spec_from_file_location(
                    "", self.path_to_config_file)
                config_module = importlib.util.module_from_spec(spec)
                spec.loader.exec_module(config_module)
            except FileNotFoundError:
                logger.error(f"File {self.path_to_config_file} not found")
            except AttributeError:
                logger.error(
                    f"File {self.path_to_config_file} is not a python file")
            except Exception as e:
                logger.error(f"An exception occurred when trying to import "
                             f"{self.path_to_config_file}")
                logger.error(e)
            else:
                self._config_module = config_module

        self._update_parameters()

    def _update_parameters(self):
        """Updates the parameters and attributes

        """
        params_config, params_program = {}, {}
        comments_config, comments_program = {}, {}

        if self._config_module:
            try:
                params_config, comments_config = self._parse_parameters(
                    self._config_module.get_parameters())
            except AttributeError:
                logger.error(f"{self.path_to_config_file} needs to "
                             f"have a get_parameters function "
                             f"with no arguments.")
            except Exception as e:
                logger.error(f"An exception occurred when trying to get the "
                             f"parameters from {self.path_to_config_file}")
                logger.error(e)

        if self._program_module:
            try:
                params_program, comments_program = self._parse_parameters(
                    self._program_module.get_parameters())
            except AttributeError:
                logger.error(f"{self.path_to_program_file} needs "
                             f"to have a get_parameters function")
            except Exception as e:
                logger.error(f"An exception occurred when trying to get the "
                             f"parameters from {self.path_to_program_file}")
                logger.error(e)

        comments_config.update(comments_program)
        self.comments = comments_config

        params_config.update(params_program)
        self.parameters = params_config

    def _parse_parameters(self, params_in):
        """Parses the parameters dictionary entered in the file

        Returns the parameters and comments dictionaries

        """
        tmp_parameters = {}
        tmp_comments = {}

        for key, value in params_in.items():
            if isinstance(value, tuple) and len(value) == 2:
                # Avoid updating parameters if they already exist
                if not self.parameters or key not in self.parameters:
                    tmp_parameters[key] = str(value[0])
                else:
                    tmp_parameters[key] = self.parameters[key]
                tmp_comments[key] = str(value[1])
            else:
                if not self.parameters or key not in self.parameters:
                    tmp_parameters[key] = str(value)
                else:
                    tmp_parameters[key] = self.parameters[key]
                tmp_comments[key] = ''

        return tmp_parameters, tmp_comments

    def _find_variables(self):
        """Attempts to find the variables saved in a QUA program

        There are 2 types are variables: scalars and raw ADC data.
        Scalars have to be explicitly saved with a call to the save
        function whereas raw ADC data can be saved by using a string
        instead of None as the third argument of the measure function.

        New in v4: there are now streams that can be declared in the QUA
        program and saved in a special stream_processing section of the
        QUA program.

        The strategy employed here to find the name of the variables
        is

        1) Find the get_results() function

        2) Find the name of the variable returned

        3) Find a with statement that defines that variable (with the
        program() context manager)

        4) Find all instances of save(), save_all() and measure() inside
        the with statement.

        In the end, we are (almost) guaranteed to find a superset of
        all variables that will be returned by the OPX.

        """

        saved_vars = set([])
        get_results_fun, prog_name, program_node = None, None, None

        # Make sure the program is somewhat valid before parsing it
        try:
            if self._program_module:
                with open(self.path_to_program_file) as f:
                    try:
                        root = ast.parse(f.read())
                    except Exception as e:
                        logger.error(f"An error occurred when parsing "
                                     f"{self.path_to_program_file}")
                        logger.error(e)
                        raise ParseError

                for i in ast.iter_child_nodes(root):
                    if isinstance(i, ast.FunctionDef) and i.name == 'get_prog':
                        get_results_fun = i
                        break

                if not get_results_fun:
                    logger.error("Unable to find the get_prog function "
                                 "in the program file")
                    raise ParseError

                for i in ast.iter_child_nodes(get_results_fun):
                    if isinstance(i, ast.Return):
                        prog_name = i.value.id
                        break

                if not prog_name:
                    logger.error("Unable to find the name of the QUA program "
                                 "in the get_prog function")
                    raise ParseError

                for i in ast.iter_child_nodes(get_results_fun):
                    if (isinstance(i, ast.With) and i.items[0].optional_vars
                            and i.items[0].optional_vars.id == prog_name):
                        program_node = i
                        break

                if not program_node:
                    logger.error("Unable to find the QUA program definition "
                                 "in the get_prog function")
                    raise ParseError

                for i in ast.walk(program_node):
                    if isinstance(i, ast.Call) and isinstance(
                            i.func, ast.Name):
                        if i.func.id == 'save' and isinstance(
                                i.args[1], ast.Str):
                            saved_vars.add(i.args[1].s)
                        elif (i.func.id == 'measure'
                              and isinstance(i.args[2], ast.Str)):
                            saved_vars.add(i.args[2].s)
                    elif isinstance(i, ast.Call) and isinstance(
                            i.func, ast.Attribute) and i.func.attr in [
                                'save', 'save_all'
                            ]:
                        saved_vars.add(i.args[0].s)

        except ParseError:
            logger.error("Unable to parse the program file to find "
                         "the variable names")

        # Update the database
        de = self.database_entries.copy()
        for k in self.database_entries:
            if k.startswith('variable'):
                del de[k]

        for i in saved_vars:
            de['variable_' + i] = [0.0]

        self.database_entries = de
Ejemplo n.º 4
0
class DeclarativeNode(CompilerNode):
    """ A compiler node which represents a declarative declaration.

    Instances of this class are generated by the compiler and contain
    the information needed to create an instance of the hierarchy at
    runtime.

    """
    #: The declarative type object to instantiate.
    klass = Typed(type)

    #: The local identifier to associate with the instance.
    identifier = Str()

    #: Whether or not the node should store the locals in the map.
    store_locals = Bool(False)

    #: Whether or not the instance intercepts the child nodes.
    child_intercept = Bool(False)

    #: The expression engine to associate with the instance.
    engine = Typed(ExpressionEngine)

    #: The set of scope keys for the closure scopes. This will be None
    #: if the node does not require any closure scopes.
    closure_keys = Typed(set)

    #: The superclass nodes of this node. This will be None if the
    #: node represents a raw declarative object vs an enamldef.
    super_node = ForwardTyped(lambda: EnamlDefNode)

    def __call__(self, parent):
        """ Instantiate the type hierarchy.

        This is invoked by a parent compiler node when the declarative
        hierarchy is being instantiated.

        Parameters
        ----------
        parent : Declarative or None
            The parent declarative object for the hierarchy.

        Returns
        -------
        result : Declarative
            The declarative instance created by the node.

        """
        klass = self.klass
        instance = klass.__new__(klass)
        self.populate(instance)
        instance.__init__(parent)
        return instance

    def populate(self, instance):
        """ Populate an instance generated for the node.

        Parameters
        ----------
        instance : Declarative
            The declarative instance for this node.

        """
        if self.super_node is not None:
            self.super_node(instance)
        f_locals = peek_scope()
        scope_key = self.scope_key
        if self.identifier:
            f_locals[self.identifier] = instance
        if self.store_locals:
            instance._d_storage[scope_key] = f_locals
        if self.engine is not None:
            instance._d_engine = self.engine
        if self.closure_keys is not None:
            for key in self.closure_keys:
                instance._d_storage[key] = fetch_scope(key)
        if self.child_intercept:
            children_copy = self.children[:]
            instance.child_node_intercept(children_copy, scope_key, f_locals)
        else:
            for node in self.children:
                node(instance)

    def size(self):
        """ Return the size of the instantiated node.

        """
        return 1

    def update_id_nodes(self, mapping):
        """ Update the id nodes for this node.

        Parameters
        ----------
        mapping : sortedmap
            The mapping to fill with the identifier information.

        """
        if self.identifier:
            mapping[self.identifier] = self
        super(DeclarativeNode, self).update_id_nodes(mapping)

    def copy(self):
        """ Create a copy of this portion of the node hierarchy.

        Returns
        -------
        result : DeclarativeNode
            A copy of the node hierarchy from this node down.

        """
        node = super(DeclarativeNode, self).copy()
        node.klass = self.klass
        node.identifier = self.identifier
        node.store_locals = self.store_locals
        node.child_intercept = self.child_intercept
        if self.engine is not None:
            node.engine = self.engine.copy()
        if self.super_node is not None:
            node.super_node = self.super_node.copy()
        if self.closure_keys is not None:
            node.closure_keys = self.closure_keys.copy()
        return node
Ejemplo n.º 5
0
class ToolkitDialog(ToolkitObject):
    """ A base class for defining toolkit dialogs.

    A toolkit dialog is a dialog where the content is defined by the
    toolkit rather than the user. Customary examples would be a file
    dialog or a color selection dialog, where the implementation can
    often be a native operating system dialog.

    """
    #: The title of the dialog window.
    title = d_(Str())

    #: An optional callback which will be invoked when the dialog is
    #: closed. This is a convenience to make it easier to handle a
    #: dialog opened in non-blocking mode. The callback must accept
    #: a single argument, which will be the dialog instance.
    callback = d_(Callable())

    #: Whether to destroy the dialog widget on close. The default is
    #: True since dialogs are typically used in a transitory fashion.
    destroy_on_close = d_(Bool(True))

    #: An event fired if the dialog is accepted. It has no payload.
    accepted = d_(Event(), writable=False)

    #: An event fired when the dialog is rejected. It has no payload.
    rejected = d_(Event(), writable=False)

    #: An event fired when the dialog is finished. The payload is the
    #: boolean result of the dialog.
    finished = d_(Event(bool), writable=False)

    #: Whether or not the dialog was accepted by the user. It will be
    #: updated when the dialog is closed. This value is output only.
    result = Bool(False)

    #: A reference to the ProxyToolkitDialog object.
    proxy = Typed(ProxyToolkitDialog)

    def show(self):
        """ Open the dialog as a non modal dialog.

        """
        if not self.is_initialized:
            self.initialize()
        if not self.proxy_is_active:
            self.activate_proxy()
        self._prepare()
        self.proxy.show()

    def open(self):
        """ Open the dialog as a window modal dialog.

        """
        if not self.is_initialized:
            self.initialize()
        if not self.proxy_is_active:
            self.activate_proxy()
        self._prepare()
        self.proxy.open()

    def exec_(self):
        """ Open the dialog as an application modal dialog.

        Returns
        -------
        result : bool
            Whether or not the dialog was accepted.

        """
        if not self.is_initialized:
            self.initialize()
        if not self.proxy_is_active:
            self.activate_proxy()
        self._prepare()
        self.proxy.exec_()
        return self.result

    def accept(self):
        """ Accept the current state and close the dialog.

        """
        if self.proxy_is_active:
            self.proxy.accept()

    def reject(self):
        """ Reject the current state and close the dialog.

        """
        if self.proxy_is_active:
            self.proxy.reject()

    #--------------------------------------------------------------------------
    # Observers
    #--------------------------------------------------------------------------
    @observe('title')
    def _update_proxy(self, change):
        """ An observer which updates the proxy when the data changes.

        """
        # The superclass implementation is sufficient.
        super(ToolkitDialog, self)._update_proxy(change)

    #--------------------------------------------------------------------------
    # Utility Methods
    #--------------------------------------------------------------------------
    def _proxy_finished(self, result):
        """ Called by the proxy object when the dialog is finished.

        Parameters
        ----------
        result : bool
            Wether or not the dialog was accepted.

        """
        self.result = result
        self.finished(result)
        if result:
            self.accepted()
        else:
            self.rejected()
        if self.callback:
            self.callback(self)
        if self.destroy_on_close:
            deferred_call(self.destroy)

    def _prepare(self):
        """ Prepare the dialog to be shown.

        This method can be reimplemented by subclasses.

        """
        self.result = False
Ejemplo n.º 6
0
class LinePlotModel(Atom):
    """
    This class performs all the required line plots.

    Attributes
    ----------
    data : array
        Experimental data
    _fit : class object
        Figure object from matplotlib
    _ax : class object
        Axis object from matplotlib
    _canvas : class object
        Canvas object from matplotlib
    element_id : int
        Index of element
    parameters : `atom.List`
        A list of `Parameter` objects, subclassed from the `Atom` base class.
        These `Parameter` objects hold all relevant xrf information
    elist : list
        Emission energy and intensity for given element
    plot_opt : int
        Linear or log plot
    total_y : dict
        Results for k lines
    total_l : dict
        Results for l and m lines
    prefit_x : array
        X axis with limited range
    plot_title : str
        Title for plotting
    fit_x : array
        x value for fitting
    fit_y : array
        fitted data
    plot_type : list
        linear or log plot
    max_v : float
        max value of data array
    incident_energy : float
        in KeV
    """
    data = Typed(object) #Typed(np.ndarray)
    exp_data_label = Str('experiment')

    _fig = Typed(Figure)
    _ax = Typed(Axes)
    _canvas = Typed(object)
    element_id = Int(0)
    parameters = Dict()
    elist = List()
    scale_opt = Int(0)
    #total_y = Dict()
    #total_l = Dict()
    #total_m = Dict()
    #total_pileup = Dict()

    prefit_x = Typed(object)
    plot_title = Str()
    #fit_x = Typed(np.ndarray)
    #fit_y = Typed(np.ndarray)
    #residual = Typed(np.ndarray)

    plot_type = List()
    max_v = Float()
    incident_energy = Float(30.0)

    eline_obj = List()

    plot_exp_opt = Bool(False)
    plot_exp_obj = Typed(Line2D)
    show_exp_opt = Bool(False)

    plot_exp_list = List()
    data_sets = Typed(OrderedDict)

    auto_fit_obj = List()
    show_autofit_opt = Bool()

    plot_fit_obj = List() #Typed(Line2D)
    show_fit_opt = Bool(False)
    #fit_all = Typed(object)

    plot_style = Dict()

    roi_plot_dict = Dict()
    roi_dict = Typed(object) #OrderedDict()

    log_range = List()
    linear_range = List()
    plot_escape_line = Int(0)
    emission_line_window = Bool(True)
    det_materials = Int(0)
    escape_e = Float(1.73998)
    limit_cut = Int()
    #prefix_name_roi = Str()
    #element_for_roi = Str()
    #element_list_roi = List()
    #roi_dict = Typed(object) #OrderedDict()

    #data_dict = Dict()
    #roi_result = Dict()

    def __init__(self):
        self._fig = plt.figure(figsize=(4,4))

        self._ax = self._fig.add_subplot(111)
        self._ax.set_axis_bgcolor('lightgrey')

        self._ax.set_xlabel('Energy [keV]')
        self._ax.set_ylabel('Counts')
        self._ax.set_yscale('log')
        self.plot_type = ['LinLog', 'Linear']

        self._ax.autoscale_view(tight=True)
        self._ax.legend(loc=2)

        self._color_config()
        self._fig.tight_layout(pad=0.5)
        self.max_v = 1.0
        # when we calculate max value, data smaller than 500, 0.5 Kev, can be ignored.
        # And the last point of data is also huge, and should be cut off.
        self.limit_cut = 100
        #self._ax.margins(x=0.0, y=0.10)

    def _color_config(self):
        self.plot_style = {
            'experiment': {'color': 'blue', 'linestyle': '',
                           'marker': '.', 'label': self.exp_data_label},
            'background': {'color': 'indigo', 'marker': '+',
                           'markersize': 1, 'label': 'background'},
            'emission_line': {'color': 'red', 'linewidth': 2},
            'roi_line': {'color': 'black', 'linewidth': 2},
            'k_line': {'color': 'green', 'label': 'k lines'},
            'l_line': {'color': 'magenta', 'label': 'l lines'},
            'm_line': {'color': 'brown', 'label': 'm lines'},
            'compton': {'color': 'darkcyan', 'linewidth': 1.5, 'label': 'compton'},
            'elastic': {'color': 'purple', 'label': 'elastic'},
            'escape': {'color': 'darkblue', 'label': 'escape'},
            'pileup': {'color': 'orange', 'label': 'pileup'},
            #'auto_fit': {'color': 'black', 'label': 'auto fitted', 'linewidth': 2.5},
            'fit': {'color': 'red', 'label': 'fit', 'linewidth': 2.5},
            'residual': {'color': 'black', 'label': 'residual', 'linewidth': 2.0}
        }

    def plot_exp_data_update(self, change):
        """
        Observer function to be connected to the fileio model
        in the top-level gui.py startup

        Parameters
        ----------
        changed : dict
            This is the dictionary that gets passed to a function
            with the @observe decorator
        """
        self.plot_exp_opt = False   # exp data for fitting
        self.show_exp_opt = False   # all exp data from different channels
        self.show_fit_opt = False

    def _update_canvas(self):
        self._ax.legend(loc=2)
        try:
            self._ax.legend(framealpha=0.2).draggable()
        except AttributeError:
            self._ax.legend(framealpha=0.2)
        self._fig.tight_layout(pad=0.5)
        #self._ax.margins(x=0.0, y=0.10)

        # when we click the home button on matplotlib gui,
        # relim will remember the previously defined x range
        self._ax.relim(visible_only=True)
        self._fig.canvas.draw()

    def _update_ylimit(self):
        # manually define y limit, from experience
        self.log_range = [self.max_v*1e-5, self.max_v*2]
        self.linear_range = [-0.3*self.max_v, self.max_v*1.2]

    def exp_label_update(self, change):
        """
        Observer function to be connected to the fileio model
        in the top-level gui.py startup

        Parameters
        ----------
        changed : dict
            This is the dictionary that gets passed to a function
            with the @observe decorator
        """
        self.exp_data_label = change['value']
        self.plot_style['experiment']['label'] = change['value']

    # @observe('exp_data_label')
    # def _change_exp_label(self, change):
    #     if change['type'] == 'create':
    #         return
    #     self.plot_style['experiment']['label'] = change['value']

    @observe('parameters')
    def _update_energy(self, change):
        self.incident_energy = self.parameters['coherent_sct_energy']['value']

    @observe('scale_opt')
    def _new_opt(self, change):
        self.log_linear_plot()
        self._update_canvas()

    def log_linear_plot(self):
        if self.plot_type[self.scale_opt] == 'LinLog':
            self._ax.set_yscale('log')
            #self._ax.margins(x=0.0, y=0.5)
            #self._ax.autoscale_view(tight=True)
            #self._ax.relim(visible_only=True)
            self._ax.set_ylim(self.log_range)

        else:
            self._ax.set_yscale('linear')
            #self._ax.margins(x=0.0, y=0.10)
            #self._ax.autoscale_view(tight=True)
            #self._ax.relim(visible_only=True)
            self._ax.set_ylim(self.linear_range)

    def exp_data_update(self, change):
        """
        Observer function to be connected to the fileio model
        in the top-level gui.py startup

        Parameters
        ----------
        changed : dict
            This is the dictionary that gets passed to a function
            with the @observe decorator
        """
        self.data = change['value']
        self.max_v = np.max(self.data[self.limit_cut:-self.limit_cut])
        self._update_ylimit()
        self.log_linear_plot()
        self._update_canvas()

    @observe('plot_exp_opt')
    def _new_exp_plot_opt(self, change):
        if change['type'] != 'create':
            if change['value']:
                self.plot_exp_obj.set_visible(True)
                lab = self.plot_exp_obj.get_label()
                self.plot_exp_obj.set_label(lab.strip('_'))
            else:
                self.plot_exp_obj.set_visible(False)
                lab = self.plot_exp_obj.get_label()
                self.plot_exp_obj.set_label('_' + lab)

            self._update_canvas()

    def plot_experiment(self):
        """
        PLot raw experiment data for fitting.
        """
        try:
            self.plot_exp_obj.remove()
            logger.debug('Previous experimental data is removed.')
        except AttributeError:
            logger.debug('No need to remove experimental data.')

        data_arr = np.asarray(self.data)
        self.exp_data_update({'value': data_arr})

        x_v = (self.parameters['e_offset']['value'] +
               np.arange(len(data_arr)) *
               self.parameters['e_linear']['value'] +
               np.arange(len(data_arr))**2 *
               self.parameters['e_quadratic']['value'])

        self.plot_exp_obj, = self._ax.plot(x_v, data_arr,
                                           linestyle=self.plot_style['experiment']['linestyle'],
                                           color=self.plot_style['experiment']['color'],
                                           marker=self.plot_style['experiment']['marker'],
                                           label=self.plot_style['experiment']['label'])

    def plot_multi_exp_data(self):
        while(len(self.plot_exp_list)):
            self.plot_exp_list.pop().remove()

        color_n = get_color_name()

        self.max_v = 1.0
        m = 0
        for (k, v) in six.iteritems(self.data_sets):
            if v.plot_index:

                data_arr = np.asarray(v.data)
                self.max_v = np.max([self.max_v,
                                     np.max(data_arr[self.limit_cut:-self.limit_cut])])

                x_v = (self.parameters['e_offset']['value'] +
                       np.arange(len(data_arr)) *
                       self.parameters['e_linear']['value'] +
                       np.arange(len(data_arr))**2 *
                       self.parameters['e_quadratic']['value'])

                plot_exp_obj, = self._ax.plot(x_v, data_arr,
                                              color=color_n[m],
                                              label=v.filename.split('.')[0],
                                              linestyle=self.plot_style['experiment']['linestyle'],
                                              marker=self.plot_style['experiment']['marker'])
                self.plot_exp_list.append(plot_exp_obj)
                m += 1

        self._update_ylimit()
        self.log_linear_plot()
        self._update_canvas()

    @observe('show_exp_opt')
    def _update_exp(self, change):
        if change['type'] != 'create':
            if change['value']:
                if len(self.plot_exp_list):
                    for v in self.plot_exp_list:
                        v.set_visible(True)
                        lab = v.get_label()
                        if lab != '_nolegend_':
                            v.set_label(lab.strip('_'))
            else:
                if len(self.plot_exp_list):
                    for v in self.plot_exp_list:
                        v.set_visible(False)
                        lab = v.get_label()
                        if lab != '_nolegend_':
                            v.set_label('_' + lab)
            self._update_canvas()

    def plot_emission_line(self):
        """
        Plot emission line and escape peaks associated with given lines.
        The value of self.max_v is needed in this function in order to plot
        the relative height of each emission line.
        """
        while(len(self.eline_obj)):
            self.eline_obj.pop().remove()

        escape_e = self.escape_e

        if len(self.elist):
            self._ax.hold(True)
            for i in range(len(self.elist)):
                eline, = self._ax.plot([self.elist[i][0], self.elist[i][0]],
                                       [0, self.elist[i][1]*self.max_v],
                                       color=self.plot_style['emission_line']['color'],
                                       linewidth=self.plot_style['emission_line']['linewidth'])
                self.eline_obj.append(eline)
                if self.plot_escape_line and self.elist[i][0] > escape_e:
                    eline, = self._ax.plot([self.elist[i][0]-escape_e,
                                            self.elist[i][0]-escape_e],
                                           [0, self.elist[i][1]*self.max_v],
                                           color=self.plot_style['escape']['color'],
                                           linewidth=self.plot_style['emission_line']['linewidth'])
                    self.eline_obj.append(eline)

    @observe('element_id')
    def set_element(self, change):
        if change['value'] == 0:
            while(len(self.eline_obj)):
                self.eline_obj.pop().remove()
            self.elist = []
            self._fig.canvas.draw()
            return

        incident_energy = self.incident_energy

        self.elist = []
        total_list = K_LINE + L_LINE + M_LINE
        logger.debug('Plot emission line for element: '
                     '{} with incident energy {}'.format(self.element_id,
                                                         incident_energy))
        ename = total_list[self.element_id-1]

        if '_K' in ename:
            e = Element(ename[:-2])
            if e.cs(incident_energy)['ka1'] != 0:
                for i in range(4):
                    self.elist.append((e.emission_line.all[i][1],
                                       e.cs(incident_energy).all[i][1]
                                       / e.cs(incident_energy).all[0][1]))

        elif '_L' in ename:
            e = Element(ename[:-2])
            if e.cs(incident_energy)['la1'] != 0:
                for i in range(4, 17):
                    self.elist.append((e.emission_line.all[i][1],
                                       e.cs(incident_energy).all[i][1]
                                       / e.cs(incident_energy).all[4][1]))

        else:
            e = Element(ename[:-2])
            if e.cs(incident_energy)['ma1'] != 0:
                for i in range(17, 21):
                    self.elist.append((e.emission_line.all[i][1],
                                       e.cs(incident_energy).all[i][1]
                                       / e.cs(incident_energy).all[17][1]))
        self.plot_emission_line()
        self._update_canvas()

    @observe('det_materials')
    def _update_det_materials(self, change):
        if change['value'] == 0:
            self.escape_e = 1.73998
        else:
            self.escape_e = 9.88640

    def plot_roi_bound(self):
        """
        Plot roi with low, high and ceter value.
        """
        for k, v in six.iteritems(self.roi_plot_dict):
            for data in v:
                data.remove()
        self.roi_plot_dict.clear()

        if len(self.roi_dict):
            #self._ax.hold(True)
            for k, v in six.iteritems(self.roi_dict):
                temp_list = []
                for linev in np.array([v.left_val, v.line_val, v.right_val])/1000.:
                    lineplot, = self._ax.plot([linev, linev],
                                              [0, 1*self.max_v],
                                              color=self.plot_style['roi_line']['color'],
                                              linewidth=self.plot_style['roi_line']['linewidth'])
                    if v.show_plot:
                        lineplot.set_visible(True)
                    else:
                        lineplot.set_visible(False)
                    temp_list.append(lineplot)
                self.roi_plot_dict.update({k: temp_list})

        self._update_canvas()

    @observe('roi_dict')
    def show_roi_bound(self, change):
        logger.debug('roi dict changed {}'.format(change['value']))
        self.plot_roi_bound()

        if len(self.roi_dict):
            for k, v in six.iteritems(self.roi_dict):
                if v.show_plot:
                    for l in self.roi_plot_dict[k]:
                        l.set_visible(True)
                else:
                    for l in self.roi_plot_dict[k]:
                        l.set_visible(False)
        self._update_canvas()

    # def plot_autofit(self):
    #     sum_y = 0
    #     while(len(self.auto_fit_obj)):
    #         self.auto_fit_obj.pop().remove()
    #
    #     k_auto = 0
    #
    #     # K lines
    #     if len(self.total_y):
    #         self._ax.hold(True)
    #         for k, v in six.iteritems(self.total_y):
    #             if k == 'background':
    #                 ln, = self._ax.plot(self.prefit_x, v,
    #                                     color=self.plot_style['background']['color'],
    #                                     #marker=self.plot_style['background']['marker'],
    #                                     #markersize=self.plot_style['background']['markersize'],
    #                                     label=self.plot_style['background']['label'])
    #             elif k == 'compton':
    #                 ln, = self._ax.plot(self.prefit_x, v,
    #                                     color=self.plot_style['compton']['color'],
    #                                     linewidth=self.plot_style['compton']['linewidth'],
    #                                     label=self.plot_style['compton']['label'])
    #             elif k == 'elastic':
    #                 ln, = self._ax.plot(self.prefit_x, v,
    #                                     color=self.plot_style['elastic']['color'],
    #                                     label=self.plot_style['elastic']['label'])
    #             elif k == 'escape':
    #                 ln, = self._ax.plot(self.prefit_x, v,
    #                                     color=self.plot_style['escape']['color'],
    #                                     label=self.plot_style['escape']['label'])
    #             else:
    #                 # only the first one has label
    #                 if k_auto == 0:
    #                     ln, = self._ax.plot(self.prefit_x, v,
    #                                         color=self.plot_style['k_line']['color'],
    #                                         label=self.plot_style['k_line']['label'])
    #                 else:
    #                     ln, = self._ax.plot(self.prefit_x, v,
    #                                         color=self.plot_style['k_line']['color'],
    #                                         label='_nolegend_')
    #                 k_auto += 1
    #             self.auto_fit_obj.append(ln)
    #             sum_y += v
    #
    #     # L lines
    #     if len(self.total_l):
    #         self._ax.hold(True)
    #         for i, (k, v) in enumerate(six.iteritems(self.total_l)):
    #             # only the first one has label
    #             if i == 0:
    #                 ln, = self._ax.plot(self.prefit_x, v,
    #                                     color=self.plot_style['l_line']['color'],
    #                                     label=self.plot_style['l_line']['label'])
    #             else:
    #                 ln, = self._ax.plot(self.prefit_x, v,
    #                                     color=self.plot_style['l_line']['color'],
    #                                     label='_nolegend_')
    #             self.auto_fit_obj.append(ln)
    #             sum_y += v
    #
    #     # M lines
    #     if len(self.total_m):
    #         self._ax.hold(True)
    #         for i, (k, v) in enumerate(six.iteritems(self.total_m)):
    #             # only the first one has label
    #             if i == 0:
    #                 ln, = self._ax.plot(self.prefit_x, v,
    #                                     color=self.plot_style['m_line']['color'],
    #                                     label=self.plot_style['m_line']['label'])
    #             else:
    #                 ln, = self._ax.plot(self.prefit_x, v,
    #                                     color=self.plot_style['m_line']['color'],
    #                                     label='_nolegend_')
    #             self.auto_fit_obj.append(ln)
    #             sum_y += v
    #
    #     # pileup
    #     if len(self.total_pileup):
    #         self._ax.hold(True)
    #         for i, (k, v) in enumerate(six.iteritems(self.total_pileup)):
    #             # only the first one has label
    #             if i == 0:
    #                 ln, = self._ax.plot(self.prefit_x, v,
    #                                     color=self.plot_style['pileup']['color'],
    #                                     label=self.plot_style['pileup']['label'])
    #             else:
    #                 ln, = self._ax.plot(self.prefit_x, v,
    #                                     color=self.plot_style['pileup']['color'],
    #                                     label='_nolegend_')
    #             self.auto_fit_obj.append(ln)
    #             sum_y += v
    #
    #     if len(self.total_y) or len(self.total_l) or len(self.total_m):
    #         self._ax.hold(True)
    #         ln, = self._ax.plot(self.prefit_x, sum_y,
    #                             color=self.plot_style['auto_fit']['color'],
    #                             label=self.plot_style['auto_fit']['label'],
    #                             linewidth=self.plot_style['auto_fit']['linewidth'])
    #         self.auto_fit_obj.append(ln)

    # @observe('show_autofit_opt')
    # def update_auto_fit(self, change):
    #     if change['value']:
    #         if len(self.auto_fit_obj):
    #             for v in self.auto_fit_obj:
    #                 v.set_visible(True)
    #                 lab = v.get_label()
    #                 if lab != '_nolegend_':
    #                     v.set_label(lab.strip('_'))
    #     else:
    #         if len(self.auto_fit_obj):
    #             for v in self.auto_fit_obj:
    #                 v.set_visible(False)
    #                 lab = v.get_label()
    #                 if lab != '_nolegend_':
    #                     v.set_label('_' + lab)
    #     self._update_canvas()

    # def set_prefit_data_and_plot(self, prefit_x,
    #                              total_y, total_l,
    #                              total_m, total_pileup):
    #     """
    #     Parameters
    #     ----------
    #     prefit_x : array
    #         X axis with limited range
    #     total_y : dict
    #         Results for k lines, bg, and others
    #     total_l : dict
    #         Results for l lines
    #     total_m : dict
    #         Results for m lines
    #     total_pileup : dict
    #         Results for pileups
    #     """
    #     self.prefit_x = prefit_x
    #     # k lines
    #     self.total_y = total_y
    #     # l lines
    #     self.total_l = total_l
    #     # m lines
    #     self.total_m = total_m
    #     # pileup
    #     self.total_pileup = total_pileup
    #
    #     #self._ax.set_xlim([self.prefit_x[0], self.prefit_x[-1]])
    #     self.plot_autofit()
    #     #self.log_linear_plot()
    #     self._update_canvas()

    def plot_fit(self, fit_x, fit_y, fit_all, residual=None):
        """
        Parameters
        ----------
        fit_x : array
            energy axis
        fit_y : array
            fitted spectrum
        fit_all : dict
            dict of individual line
        residual : array
            residual between fit and exp
        """
        while(len(self.plot_fit_obj)):
            self.plot_fit_obj.pop().remove()

        ln, = self._ax.plot(fit_x, fit_y,
                            color=self.plot_style['fit']['color'],
                            label=self.plot_style['fit']['label'],
                            linewidth=self.plot_style['fit']['linewidth'])
        self.plot_fit_obj.append(ln)

        if residual is not None:
            shiftv = 1.5  # move residual down by some amount
            ln, = self._ax.plot(fit_x,
                                residual - 0.15*self.max_v,  #shiftv*(np.max(np.abs(self.residual))),
                                label=self.plot_style['residual']['label'],
                                color=self.plot_style['residual']['color'])
            self.plot_fit_obj.append(ln)

        k_num = 0
        l_num = 0
        m_num = 0
        p_num = 0
        for k, v in six.iteritems(fit_all):
            if k == 'background':
                ln, = self._ax.plot(fit_x, v,
                                    color=self.plot_style['background']['color'],
                                    #marker=self.plot_style['background']['marker'],
                                    #markersize=self.plot_style['background']['markersize'],
                                    label=self.plot_style['background']['label'])
                self.plot_fit_obj.append(ln)
            elif k == 'compton':
                ln, = self._ax.plot(fit_x, v,
                                    color=self.plot_style['compton']['color'],
                                    linewidth=self.plot_style['compton']['linewidth'],
                                    label=self.plot_style['compton']['label'])
                self.plot_fit_obj.append(ln)
            elif k == 'elastic':
                ln, = self._ax.plot(fit_x, v,
                                    color=self.plot_style['elastic']['color'],
                                    label=self.plot_style['elastic']['label'])
                self.plot_fit_obj.append(ln)
            elif k == 'escape':
                ln, = self._ax.plot(fit_x, v,
                                    color=self.plot_style['escape']['color'],
                                    label=self.plot_style['escape']['label'])
                self.plot_fit_obj.append(ln)

            elif '-' in k:  # Si_K-Si_K
                if p_num == 0:
                    ln, = self._ax.plot(fit_x, v,
                                        color=self.plot_style['pileup']['color'],
                                        label=self.plot_style['pileup']['label'])
                else:
                    ln, = self._ax.plot(fit_x, v,
                                        color=self.plot_style['pileup']['color'],
                                        label='_nolegend_')
                self.plot_fit_obj.append(ln)
                p_num += 1

            elif ('_K' in k.upper()) and (len(k) <= 4):
                if k_num == 0:
                    ln, = self._ax.plot(fit_x, v,
                                        color=self.plot_style['k_line']['color'],
                                        label=self.plot_style['k_line']['label'])
                else:
                    ln, = self._ax.plot(fit_x, v,
                                        color=self.plot_style['k_line']['color'],
                                        label='_nolegend_')
                self.plot_fit_obj.append(ln)
                k_num += 1

            elif ('_L' in k.upper()) and (len(k) <= 4):
                if l_num == 0:
                    ln, = self._ax.plot(fit_x, v,
                                        color=self.plot_style['l_line']['color'],
                                        label=self.plot_style['l_line']['label'])
                else:
                    ln, = self._ax.plot(fit_x, v,
                                        color=self.plot_style['l_line']['color'],
                                        label='_nolegend_')
                self.plot_fit_obj.append(ln)
                l_num += 1

            elif ('_M' in k.upper()) and (len(k) <= 4):
                if m_num == 0:
                    ln, = self._ax.plot(fit_x, v,
                                        color=self.plot_style['m_line']['color'],
                                        label=self.plot_style['m_line']['label'])
                else:
                    ln, = self._ax.plot(fit_x, v,
                                        color=self.plot_style['m_line']['color'],
                                        label='_nolegend_')
                self.plot_fit_obj.append(ln)
                m_num += 1

            else:
                pass

        #self._update_canvas()

    @observe('show_fit_opt')
    def _update_fit(self, change):
        if change['type'] != 'create':
            if change['value']:
                for v in self.plot_fit_obj:
                    v.set_visible(True)
                    lab = v.get_label()
                    if lab != '_nolegend_':
                        v.set_label(lab.strip('_'))
            else:
                for v in self.plot_fit_obj:
                    v.set_visible(False)
                    lab = v.get_label()
                    if lab != '_nolegend_':
                        v.set_label('_' + lab)
            self._update_canvas()
Ejemplo n.º 7
0
class DataflowConfiguration(UTQLSchemaEntity):
    class_name = Str()

    attributes = List()
Ejemplo n.º 8
0
class DeclaracadPlugin(Plugin):
    #: Project site
    wiki_page = Str("https;//declaracad.com/")

    #: Dock items to add
    dock_items = List(DockItem)
    dock_layout = Instance(AreaLayout)
    dock_style = Enum(*reversed(ALL_STYLES)).tag(config=True)

    #: Settings pages to add
    settings_pages = List(extensions.SettingsPage)

    #: Current settings page
    settings_page = Instance(extensions.SettingsPage)

    #: Internal settings models
    settings_model = Instance(Atom)

    def start(self):
        """ Load all the plugins declaracad is dependent on """
        w = self.workbench
        super(DeclaracadPlugin, self).start()
        self._refresh_dock_items()
        self._refresh_settings_pages()

    def _bind_observers(self):
        """ Setup the observers for the plugin.
        """
        super(DeclaracadPlugin, self)._bind_observers()
        workbench = self.workbench
        point = workbench.get_extension_point(extensions.DOCK_ITEM_POINT)
        point.observe('extensions', self._refresh_dock_items)

        point = workbench.get_extension_point(extensions.SETTINGS_PAGE_POINT)
        point.observe('extensions', self._refresh_settings_pages)

    def _unbind_observers(self):
        """ Remove the observers for the plugin.
        """
        super(DeclaracadPlugin, self)._unbind_observers()
        workbench = self.workbench
        point = workbench.get_extension_point(extensions.DOCK_ITEM_POINT)
        point.unobserve('extensions', self._refresh_dock_items)

        point = workbench.get_extension_point(extensions.SETTINGS_PAGE_POINT)
        point.unobserve('extensions', self._refresh_settings_pages)

    # -------------------------------------------------------------------------
    # Dock API
    # -------------------------------------------------------------------------
    def create_new_area(self):
        """ Create the dock area
        """
        with enaml.imports():
            from .dock import DockView
        area = DockView(workbench=self.workbench, plugin=self)
        return area

    def get_dock_area(self):
        """ Get the dock area

        Returns
        -------
            area: DockArea
        """
        ui = self.workbench.get_plugin('enaml.workbench.ui')
        if not ui.workspace or not ui.workspace.content:
            ui.select_workspace('declaracad.workspace')
        return ui.workspace.content.find('dock_area')

    def _refresh_dock_items(self, change=None):
        """ Reload all DockItems registered by any Plugins

        Any plugin can add to this list by providing a DockItem
        extension in their PluginManifest.

        """
        workbench = self.workbench
        point = workbench.get_extension_point(extensions.DOCK_ITEM_POINT)

        #: Layout spec
        layout = {name: [] for name in extensions.DockItem.layout.items}

        dock_items = []
        for extension in sorted(point.extensions, key=lambda ext: ext.rank):
            for declaration in extension.get_children(extensions.DockItem):
                # Create the item
                DockItem = declaration.factory()
                plugin = workbench.get_plugin(declaration.plugin_id)
                item = DockItem(plugin=plugin)

                # Add to our layout
                layout[declaration.layout].append(item.name)

                # Save it
                dock_items.append(item)

        # Update items
        log.debug("Updating dock items: {}".format(dock_items))
        self.dock_items = dock_items
        self._refresh_layout(layout)

    def _refresh_layout(self, layout):
        """ Create the layout for all the plugins


        """
        if not self.dock_items:
            return AreaLayout()
        items = layout.pop('main')
        if not items:
            raise EnvironmentError("At least one main layout item must be "
                                   "defined!")

        left_items = layout.pop('main-left', [])
        bottom_items = layout.pop('main-bottom', [])

        main = TabLayout(*items)

        if bottom_items:
            main = VSplitLayout(main, *bottom_items)
        if left_items:
            main = HSplitLayout(*left_items, main)

        dockbars = [
            DockBarLayout(*items, position=side)
            for side, items in layout.items() if items
        ]

        #: Update layout
        self.dock_layout = AreaLayout(main, dock_bars=dockbars)

    # -------------------------------------------------------------------------
    # Settings API
    # -------------------------------------------------------------------------
    def _default_settings_page(self):
        return self.settings_pages[0]

    def _observe_settings_page(self, change):
        log.debug("Settings page: {}".format(change))

    def _refresh_settings_pages(self, change=None):
        """ Reload all SettingsPages registered by any Plugins

        Any plugin can add to this list by providing a SettingsPage
        extension in their PluginManifest.

        """
        workbench = self.workbench
        point = workbench.get_extension_point(extensions.SETTINGS_PAGE_POINT)

        settings_pages = []
        for extension in sorted(point.extensions, key=lambda ext: ext.rank):
            for d in extension.get_children(extensions.SettingsPage):
                settings_pages.append(d)

        #: Update items
        settings_pages.sort(key=lambda p: p.name)
        log.debug("Updating settings pages: {}".format(settings_pages))
        self.settings_pages = settings_pages
Ejemplo n.º 9
0
 (Long(strict=True), [long(1)], [long(1)], [1.0, 1] if sys.version_info <
  (3, ) else [0.1]),
 (Long(strict=False), [1, 1.0, int(1)], 3 * [1], ['a']),
 (Range(0, 2), [0, 2], [0, 2], [-1, 3]),
 (Range(2, 0), [0, 2], [0, 2], [-1, 3]),
 (Range(0), [0, 3], [0, 3], [-1]),
 (Range(high=2), [-1, 2], [-1, 2], [3]),
 (Float(), [1, int(1), 1.1], [1.0, 1.0, 1.1], ['']),
 (Float(strict=True), [1.1], [1.1], [1]),
 (FloatRange(0.0, 0.5), [0.0, 0.5], [0.0, 0.5], [-0.1, 0.6]),
 (FloatRange(0.5, 0.0), [0.0, 0.5], [0.0, 0.5], [-0.1, 0.6]),
 (FloatRange(0.0), [0.0, 0.6], [0.0, 0.6], [-0.1]),
 (FloatRange(high=0.5), [-0.3, 0.5], [-0.3, 0.5], [0.6]),
 (Bytes(), [b'a', u'a'], [b'a'] * 2, [1]),
 (Bytes(strict=True), [b'a'], [b'a'], [u'a']),
 (Str(), [b'a', u'a'], ['a'] * 2, [1]),
 (Str(strict=True), [b'a'] if sys.version_info <
  (3, ) else [u'a'], ['a'], [u'a'] if sys.version_info < (3, ) else [b'a']),
 (Unicode(), [b'a', u'a'], [u'a'] * 2, [1]),
 (Unicode(strict=True), [u'a'], [u'a'], [b'a']),
 (Enum(1, 2, 'a'), [1, 2, 'a'], [1, 2, 'a'], [3]),
 (Callable(), [int], [int], [1]),
 (Coerced(set), [{1}, [1], (1, )], [{1}] * 3, [1]),
 (Coerced(int, coercer=lambda x: int(str(x), 2)), ['101'], [5], []),
 (Tuple(), [(1, )], [(1, )], [[1]]),
 (Tuple(Int()), [(1, )], [(1, )], [(1.0, )]),
 (Tuple(int), [(1, )], [(1, )], [(1.0, )]),
 (List(), [[1]], [[1]], [(1, )]),
 (List(Int()), [[1]], [[1]], [[1.0]]),
 (List(float), [[1.0]], [[1.0]], [[1]]),
 (List((int, float)), [[1, 1.0]], [[1, 1.0]], [['']]),
Ejemplo n.º 10
0
class NIDAQCounterChannel(NIDAQGeneralMixin, CounterChannel):

    high_samples = d_(Int().tag(metadata=True))
    low_samples = d_(Int().tag(metadata=True))
    source_terminal = d_(Str().tag(metadata=True))
Ejemplo n.º 11
0
Archivo: widget.py Proyecto: ylwb/enaml
class Widget(ToolkitObject, Stylable):
    """ The base class of visible widgets in Enaml.

    """
    #: Whether or not the widget is enabled.
    enabled = d_(Bool(True))

    #: Whether or not the widget is visible.
    visible = d_(Bool(True))

    #: The background color of the widget.
    background = d_(ColorMember())

    #: The foreground color of the widget.
    foreground = d_(ColorMember())

    #: The font used for the widget.
    font = d_(FontMember())

    #: The minimum size for the widget. The default means that the
    #: client should determine an intelligent minimum size.
    minimum_size = d_(Coerced(Size, (-1, -1)))

    #: The maximum size for the widget. The default means that the
    #: client should determine an intelligent maximum size.
    maximum_size = d_(Coerced(Size, (-1, -1)))

    #: The tool tip to show when the user hovers over the widget.
    tool_tip = d_(Str())

    #: The status tip to show when the user hovers over the widget.
    status_tip = d_(Str())

    #: Set the extra features to enable for this widget. This value must
    #: be provided when the widget is instantiated. Runtime changes to
    #: this value are ignored.
    features = d_(Coerced(Feature.Flags))

    #: A reference to the ProxyWidget object.
    proxy = Typed(ProxyWidget)

    #--------------------------------------------------------------------------
    # Observers
    #--------------------------------------------------------------------------
    @observe('enabled', 'visible', 'background', 'foreground', 'font',
             'minimum_size', 'maximum_size', 'tool_tip', 'status_tip')
    def _update_proxy(self, change):
        """ Update the proxy widget when the Widget data changes.

        This method only updates the proxy when an attribute is updated;
        not when it is created or deleted.

        """
        # The superclass implementation is sufficient.
        super(Widget, self)._update_proxy(change)

    #--------------------------------------------------------------------------
    # Reimplementations
    #--------------------------------------------------------------------------
    def restyle(self):
        """ Restyle the toolkit widget.

        This method is invoked by the Stylable class when the style
        dependencies have changed for the widget. This will trigger a
        proxy restyle if necessary. This method should not typically be
        called directly by user code.

        """
        if self.proxy_is_active:
            self.proxy.restyle()

    #--------------------------------------------------------------------------
    # Public API
    #--------------------------------------------------------------------------
    def show(self):
        """ Ensure the widget is shown.

        Calling this method will also set the widget visibility to True.

        """
        self.visible = True
        if self.proxy_is_active:
            self.proxy.ensure_visible()

    def hide(self):
        """ Ensure the widget is hidden.

        Calling this method will also set the widget visibility to False.

        """
        self.visible = False
        if self.proxy_is_active:
            self.proxy.ensure_hidden()

    def set_focus(self):
        """ Set the keyboard input focus to this widget.

        FOR ADVANCED USE CASES ONLY: DO NOT ABUSE THIS!

        """
        if self.proxy_is_active:
            self.proxy.set_focus()

    def clear_focus(self):
        """ Clear the keyboard input focus from this widget.

        FOR ADVANCED USE CASES ONLY: DO NOT ABUSE THIS!

        """
        if self.proxy_is_active:
            self.proxy.clear_focus()

    def has_focus(self):
        """ Test whether this widget has input focus.

        FOR ADVANCED USE CASES ONLY: DO NOT ABUSE THIS!

        Returns
        -------
        result : bool
            True if this widget has input focus, False otherwise.

        """
        if self.proxy_is_active:
            return self.proxy.has_focus()
        return False

    def focus_next_child(self):
        """ Give focus to the next widget in the focus chain.

        FOR ADVANCED USE CASES ONLY: DO NOT ABUSE THIS!

        """
        if self.proxy_is_active:
            self.proxy.focus_next_child()

    def focus_previous_child(self):
        """ Give focus to the previous widget in the focus chain.

        FOR ADVANCED USE CASES ONLY: DO NOT ABUSE THIS!

        """
        if self.proxy_is_active:
            self.proxy.focus_previous_child()

    @d_func
    def next_focus_child(self, current):
        """ Compute the next widget which should gain focus.

        When the FocusTraversal feature of the widget is enabled, this
        method will be invoked as a result of a Tab key press or from
        a call to the 'focus_next_child' method on a decendant of the
        owner widget. It should be reimplemented in order to provide
        custom logic for computing the next focus widget.

        ** The FocusTraversal feature must be enabled for the widget in
        order for this method to be called. **

        Parameters
        ----------
        current : Widget or None
            The current widget with input focus, or None if no widget
            has focus or if the toolkit widget with focus does not
            correspond to an Enaml widget.

        Returns
        -------
        result : Widget or None
            The next widget which should gain focus, or None to follow
            the default toolkit behavior.

        """
        return None

    @d_func
    def previous_focus_child(self, current):
        """ Compute the previous widget which should gain focus.

        When the FocusTraversal feature of the widget is enabled, this
        method will be invoked as a result of a Shift+Tab key press or
        from a call to the 'focus_prev_child' method on a decendant of
        the owner widget. It should be reimplemented in order to provide
        custom logic for computing the previous focus widget.

        ** The FocusTraversal feature must be enabled for the widget in
        order for this method to be called. **

        Parameters
        ----------
        current : Widget or None
            The current widget with input focus, or None if no widget
            has focus or if the toolkit widget with focus does not
            correspond to an Enaml widget.

        Returns
        -------
        result : Widget or None
            The previous widget which should gain focus, or None to
            follow the default toolkit behavior.

        """
        return None

    @d_func
    def focus_gained(self):
        """ A method invoked when the widget gains input focus.

        ** The FocusEvents feature must be enabled for the widget in
        order for this method to be called. **

        """
        pass

    @d_func
    def focus_lost(self):
        """ A method invoked when the widget loses input focus.

        ** The FocusEvents feature must be enabled for the widget in
        order for this method to be called. **

        """
        pass

    @d_func
    def drag_start(self):
        """ A method called at the start of a drag-drop operation.

        This method is called when the user starts a drag operation
        by dragging the widget with the left mouse button. It returns
        the drag data for the drag operation.

        ** The DragEnabled feature must be enabled for the widget in
        order for this method to be called. **

        Returns
        -------
        result : DragData
            An Enaml DragData object which holds the drag data. If
            this is not provided, no drag operation will occur.

        """
        return None

    @d_func
    def drag_end(self, drag_data, result):
        """ A method called at the end of a drag-drop operation.

        This method is called after the user has completed the drop
        operation by releasing the left mouse button. It is passed
        the original drag data object along with the resulting drop
        action of the operation.

        ** The DragEnabled feature must be enabled for the widget in
        order for this method to be called. **

        Parameters
        ----------
        data : DragData
            The drag data created by the `drag_start` method.

        result : DropAction
            The requested drop action when the drop completed.

        """
        pass

    @d_func
    def drag_enter(self, event):
        """ A method invoked when a drag operation enters the widget.

        The widget should inspect the mime data of the event and
        accept the event if it can handle the drop action. The event
        must be accepted in order to receive further drag-drop events.

        ** The DropEnabled feature must be enabled for the widget in
        order for this method to be called. **

        Parameters
        ----------
        event : DropEvent
            The event representing the drag-drop operation.

        """
        pass

    @d_func
    def drag_move(self, event):
        """ A method invoked when a drag operation moves in the widget.

        This method will not normally be implemented, but it can be
        useful for supporting advanced drag-drop interactions.

        ** The DropEnabled feature must be enabled for the widget in
        order for this method to be called. **

        Parameters
        ----------
        event : DropEvent
            The event representing the drag-drop operation.

        """
        pass

    @d_func
    def drag_leave(self):
        """ A method invoked when a drag operation leaves the widget.

        ** The DropEnabled feature must be enabled for the widget in
        order for this method to be called. **

        """
        pass

    @d_func
    def drop(self, event):
        """ A method invoked when the user drops the data on the widget.

        The widget should either accept the proposed action, or set
        the drop action to an appropriate action before accepting the
        event, or set the drop action to DropAction.Ignore and then
        ignore the event.

        ** The DropEnabled feature must be enabled for the widget in
        order for this method to be called. **

        Parameters
        ----------
        event : DropEvent
            The event representing the drag-drop operation.

        """
        pass
Ejemplo n.º 12
0
class ViewerPlugin(Plugin):
    # -------------------------------------------------------------------------
    # Default viewer settings
    # -------------------------------------------------------------------------
    background_mode = Enum('gradient', 'solid').tag(config=True,
                                                    viewer='background')
    background_top = ColorMember('lightgrey').tag(config=True,
                                                  viewer='background')
    background_bottom = ColorMember('grey').tag(config=True,
                                                viewer='background')
    background_fill_method = Enum(
        'corner3',
        'corner1',
        'corner2',
        'corner4',
        'ver',
        'hor',
        'diag1',
        'diag2',
    ).tag(config=True, viewer='background')
    trihedron_mode = Str('right-lower').tag(config=True, viewer=True)

    #: Defaults
    shape_color = ColorMember('steelblue').tag(config=True, viewer=True)

    #: Grid options
    grid_mode = Str().tag(config=True, viewer=True)
    grid_major_color = ColorMember('#444').tag(config=True,
                                               viewer='grid_colors')
    grid_minor_color = ColorMember('#888').tag(config=True,
                                               viewer='grid_colors')

    #: Rendering options
    antialiasing = Bool(True).tag(config=True, viewer=True)
    raytracing = Bool(True).tag(config=True, viewer=True)
    draw_boundaries = Bool(True).tag(config=True, viewer=True)
    shadows = Bool(True).tag(config=True, viewer=True)
    reflections = Bool(True).tag(config=True, viewer=True)
    chordial_deviation = Float(0.001).tag(config=True, viewer=True)

    # -------------------------------------------------------------------------
    # Plugin members
    # -------------------------------------------------------------------------
    #: Default dir for screenshots
    screenshot_dir = Str().tag(config=True)

    #: Exporters
    exporters = ContainerList()

    def get_viewer_members(self):
        for m in self.members().values():
            meta = m.metadata
            if not meta:
                continue
            if meta.get('viewer'):
                yield m

    def get_viewers(self):
        ViewerDockItem = viewer_factory()
        dock = self.workbench.get_plugin('declaracad.ui').get_dock_area()
        for item in dock.dock_items():
            if isinstance(item, ViewerDockItem):
                yield item

    def fit_all(self, event=None):
        return
        viewer = self.get_viewer()
        viewer.proxy.display.FitAll()

    def run(self, event=None):
        viewer = self.get_viewer()
        editor = self.workbench.get_plugin('declaracad.editor').get_editor()
        doc = editor.doc
        viewer.renderer.set_source(editor.get_text())
        doc.version += 1

    def get_viewer(self, name=None):
        for viewer in self.get_viewers():
            if name is None:
                return viewer
            elif viewer.name == name:
                return viewer

    def _default_exporters(self):
        """ TODO: push to an ExtensionPoint """
        from .exporters.stl.exporter import StlExporter
        from .exporters.step.exporter import StepExporter
        return [StlExporter, StepExporter]

    # -------------------------------------------------------------------------
    # Plugin commands
    # -------------------------------------------------------------------------

    def export(self, event):
        """ Export the current model to stl """
        options = event.parameters.get('options')
        if not options:
            raise ValueError("An export `options` parameter is required")

        # Pickle the configured exporter and send it over
        cmd = [sys.executable]
        if not sys.executable.endswith('declaracad'):
            cmd.extend(['-m', 'declaracad'])

        data = jsonpickle.dumps(options)
        assert data != 'null', f"Exporter failed to serialize: {options}"
        cmd.extend(['export', data])
        log.debug(" ".join(cmd))
        protocol = ProcessLineReceiver()
        loop = asyncio.get_event_loop()
        deferred_call(loop.subprocess_exec, lambda: protocol, *cmd)
        return protocol

    def screenshot(self, event):
        """ Export the views as a screenshot """
        if 'options' not in event.parameters:
            editor = self.workbench.get_plugin('declaracad.editor')
            filename = editor.active_document.name
            options = ScreenshotOptions(filename=filename,
                                        default_dir=self.screenshot_dir)
        else:
            options = event.parameters.get('options')
            # Update the default screenshot dir
            self.screenshot_dir, _ = os.path.split(options.path)
        results = []
        if options.target:
            viewer = self.get_viewer(options.target)
            if viewer:
                results.append(viewer.renderer.screenshot(options.path))
        else:
            for i, viewer in enumerate(self.get_viewers()):
                # Insert view number
                path, ext = os.path.splitext(options.path)
                filename = "{}-{}{}".format(path, i + 1, ext)
                results.append(viewer.renderer.screenshot(filename))
        return results
Ejemplo n.º 13
0
class ViewerProcess(ProcessLineReceiver):
    #: Window id obtained after starting the process
    window_id = Int()

    #: Process handle
    process = Instance(object)

    #: Reference to the plugin
    plugin = ForwardInstance(lambda: ViewerPlugin)

    #: Document
    document = ForwardInstance(document_type)

    #: Rendering error
    errors = Str()

    #: Process terminated intentionally
    terminated = Bool(False)

    #: Count restarts so we can detect issues with startup s
    restarts = Int()

    #: Max number it will attempt to restart
    max_retries = Int(10)

    #: ID count
    _id = Int()

    #: Holds responses temporarily
    _responses = Dict()

    #: Seconds to ping
    _ping_rate = Int(40)

    #: Capture stderr separately
    err_to_out = set_default(False)

    def redraw(self):
        if self.document:
            # Trigger a reload
            self.document.version += 1
        else:
            self.set_version(self._id)

    @observe('document', 'document.version')
    def _update_document(self, change):
        doc = self.document
        if doc is None:
            self.set_filename('-')
        else:
            self.set_filename(doc.name)
            self.set_version(doc.version)

    def send_message(self, method, *args, **kwargs):
        # Defer until it's ready
        if not self.transport or not self.window_id:
            #log.debug('renderer | message not ready deferring')
            timed_call(1000, self.send_message, method, *args, **kwargs)
            return
        _id = kwargs.pop('_id')
        _silent = kwargs.pop('_silent', False)

        request = {
            'jsonrpc': '2.0',
            'method': method,
            'params': args or kwargs
        }
        if _id is not None:
            request['id'] = _id
        if not _silent:
            log.debug(f'renderer | sent | {request}')
        encoded_msg = jsonpickle.dumps(request).encode() + b'\r\n'
        deferred_call(self.transport.write, encoded_msg)

    async def start(self):
        atexit.register(self.terminate)
        cmd = [sys.executable]
        if not sys.executable.endswith('declaracad'):
            cmd.extend(['-m', 'declaracad'])
        cmd.extend(['view', '-', '-f'])
        loop = asyncio.get_event_loop()
        self.process = await loop.subprocess_exec(lambda: self, *cmd)
        return self.process

    def restart(self):
        self.window_id = 0
        self.restarts += 1

        # TODO: 100 is probably excessive
        if self.restarts > self.max_retries:
            plugin = self.plugin
            plugin.workbench.message_critical(
                "Viewer failed to start",
                "Could not get the viewer to start after several attempts.")

            raise RuntimeError(
                "renderer | Failed to successfully start renderer aborting!")

        log.debug(f"Attempting to restart viewer {self.process}")
        deferred_call(self.start)

    def connection_made(self, transport):
        super().connection_made(transport)
        self.schedule_ping()
        self.terminated = False

    def data_received(self, data):
        line = data.decode()
        try:
            response = jsonpickle.loads(line)
            # log.debug(f"viewer | resp | {response}")
        except Exception as e:
            log.debug(f"viewer | out | {line.rstrip()}")
            response = {}

        doc = self.document

        if not isinstance(response, dict):
            log.debug(f"viewer | out | {response.rstrip()}")
            return

        #: Special case for startup
        response_id = response.get('id')
        if response_id == 'window_id':
            self.window_id = response['result']
            self.restarts = 0  # Clear the restart count
            return
        elif response_id == 'keep_alive':
            return
        elif response_id == 'invoke_command':
            command_id = response.get('command_id')
            parameters = response.get('parameters', {})
            log.debug(f"viewer | out | {command_id}({parameters})")
            self.plugin.workbench.invoke_command(command_id, parameters)
        elif response_id == 'render_error':
            if doc:
                doc.errors.extend(response['error']['message'].split("\n"))
            return
        elif response_id == 'render_success':
            if doc:
                doc.errors = []
            return
        elif response_id == 'capture_output':
            # Script output capture it
            if doc:
                doc.output = response['result'].split("\n")
            return
        elif response_id == 'shape_selection':
            #: TODO: Do something with this?
            if doc:
                doc.output.append(str(response['result']))
            return
        elif response_id is not None:
            # Lookup the deferred object that should be stored for this id
            # when it is called and invoke the callback or errback based on the
            # result
            d = self._responses.get(response_id)
            if d is not None:
                del self._responses[response_id]
                try:
                    error = response.get('error')
                    if error is not None:
                        if doc:
                            doc.errors.extend(
                                error.get('message', '').split("\n"))
                        d.add_done_callback(error)
                    else:
                        d.add_done_callback(response.get('result'))
                    return
                except Exception as e:
                    log.warning("RPC response not properly handled!")
                    log.exception(e)

            else:
                log.warning("Got unexpected reply")
            # else we got a response from something else, possibly an error?
        if 'error' in response and doc:
            doc.errors.extend(response['error'].get('message', '').split("\n"))
            doc.output.append(line)
        elif 'message' in response and doc:
            doc.output.extend(response['message'].split("\n"))
        elif doc:
            # Append to output
            doc.output.extend(line.split("\n"))

    def err_received(self, data):
        """ Catch and log error output attempting to decode it

        """
        for line in data.split(b"\n"):
            if not line:
                continue
            if line.startswith(b"QWidget::") or line.startswith(b"QPainter::"):
                continue
            try:
                line = line.decode()
                log.debug(f"render | err | {line}")
                if self.document:
                    self.document.errors.append(line)
            except Exception as e:
                log.debug(f"render | err | {line}")

    def process_exited(self, reason=None):
        log.warning(f"renderer | process ended: {reason}")
        if not self.terminated:
            # Clear the filename on crash so it works when reset
            self.restart()
        log.warning("renderer | stdout closed")

    def terminate(self):
        super(ViewerProcess, self).terminate()
        self.terminated = True

    def schedule_ping(self):
        """ Ping perioidcally so the process stays awake """
        if self.terminated:
            return
        # Ping the viewer to tell it to keep alive
        self.send_message("ping", _id="keep_alive", _silent=True)
        timed_call(self._ping_rate * 1000, self.schedule_ping)

    def __getattr__(self, name):
        """ Proxy all calls not defined here to the remote viewer.

        This makes doing `setattr(renderer, attr, value)` get passed to the
        remote viewer.

        """
        if name.startswith('set_'):

            def remote_viewer_call(*args, **kwargs):
                d = asyncio.Future()
                self._id += 1
                kwargs['_id'] = self._id
                self._responses[self._id] = d
                self.send_message(name, *args, **kwargs)
                return d

            return remote_viewer_call
        raise AttributeError("No attribute %s" % name)
Ejemplo n.º 14
0
class DeviceDriver(Declarative):
    """ Provide meta info about this device """
    #: ID of the device. If none exits one is created from manufacturer.model
    id = d_(Str())

    #: Name of the device (optional)
    name = d_(Str())

    #: Model of the device (optional)
    model = d_(Str())

    #: Manufacturer of the device (optional)
    manufacturer = d_(Str())

    #: Width of the device (required)
    width = d_(Str())

    #: Length of the device, if it uses a roll, leave blank
    length = d_(Str())

    # Factory to construct the inkcut.device.plugin.Device or subclass.
    # If none is given it will be generated by the DevicePlugin
    #: for an example, see the DeviceDriver in the inkcut.device.pi.manifest
    factory = d_(Callable(default=default_device_factory))

    # List of protocol IDs supported by this device
    protocols = d_(List(Str()))

    # List of transport IDs supported by this device
    connections = d_(List(Str()))

    #: Config view for editing the config of this device
    config_view = d_(Callable(default=default_device_config_view_factory))

    #: Default settings to contribute to the config when selected
    default_config = d_(Dict())

    def get_device_config(self):
        """ Pull the default device config params from the default_config 
        """
        cfg = self.default_config.copy()
        for k in ('connection', 'protocol', 'job'):
            cfg.pop(k, None)
        return cfg

    def get_job_config(self):
        """ Pull the default device config params from the default_config 
        """
        return self.default_config.get('job', {}).copy()

    def get_connection_config(self, id):
        """ Pull the connection config params from the default_config 
        for the given transport id.
        """
        cfg = self.default_config.get('connection', {}).copy()
        return cfg.get(id, {})

    def get_protocol_config(self, id):
        """ Pull the protocol config from the default_config """
        cfg = self.default_config.get('protocol', {}).copy()
        return cfg.get(id, {})
Ejemplo n.º 15
0
class Driver(Declarator):
    """Declarator used to register a new driver for an instrument.

    """
    #: Path to the driver object. Path should be dot separated and the class
    #: name preceded by ':'.
    #: TODO complete : ex: exopy_hqc_legacy.instruments.
    #: The path of any parent Drivers object will be prepended to it.
    driver = d_(Str())

    #: Name identifying the system the driver is built on top of (lantz, hqc,
    #: slave, etc ...). Allow to handle properly multiple drivers declared in
    #: a single extension package for the same instrument.
    architecture = d_(Str())

    #: Name of the instrument manufacturer. Can be inferred from parent
    #: Drivers.
    manufacturer = d_(Str())

    #: Serie this instrument is part of. This is optional as it does not always
    #: make sense to be specified but in some cases it can help finding a
    #: a driver. Can be inferred from parent Drivers.
    serie = d_(Str())

    #: Model of the instrument this driver has been written for.
    model = d_(Str())

    #: Kind of the instrument, to ease instrument look up. If no kind match,
    #: leave 'Other' as the kind. Can be inferred from parent
    #: Drivers.
    kind = d_(Enum(None, *INSTRUMENT_KINDS))

    #: Starter to use when initializing/finialzing this driver.
    #: Can be inferred from parent Drivers.
    starter = d_(Str())

    #: Supported connections and default values for some parameters. The
    #: admissible values for a given kind can be determined by looking at the
    #: Connection object whose id match.
    #: ex : {'VisaTCPIP' : {'port': 7500, 'resource_class': 'SOCKET'}}
    #: Can be inferred from parent Drivers.
    connections = d_(Dict())

    #: Special settings for the driver, not fitting the connections. Multiple
    #: identical connection infos with different settings can co-exist in a
    #: profile. The admissible values for a given kind can be determined by
    #: looking at the Settings object whose id match.
    #: ex : {'lantz': {'resource_manager': '@py'}}
    #: Can be inferred from parent Drivers.
    settings = d_(Dict())

    #: Id of the driver computed from the top-level package and the driver name
    id = Property(cached=True)

    def register(self, collector, traceback):
        """Collect driver and add infos to the DeclaratorCollector
        contributions member.

        """
        # Build the driver id by assembling the package name, the architecture
        # and the class name
        try:
            driver_id = self.id
        except KeyError:  # Handle the lack of architecture
            traceback[self.driver] = format_exc()
            return

        # Determine the path to the driver.
        path = self.get_path()
        try:
            d_path, driver = (path + '.' +
                              self.driver if path else self.driver).split(':')
        except ValueError:
            msg = 'Incorrect %s (%s), path must be of the form a.b.c:Class'
            traceback[driver_id] = msg % (driver_id, self.driver)
            return

        # Check that the driver does not already exist.
        if driver_id in collector.contributions or driver_id in traceback:
            i = 0
            while True:
                i += 1
                err_id = '%s_duplicate%d' % (driver_id, i)
                if err_id not in traceback:
                    break

            msg = 'Duplicate definition of {}, found in {}'
            traceback[err_id] = msg.format(self.architecture + '.' + driver,
                                           d_path)
            return

        try:
            meta_infos = {
                k: getattr(self, k)
                for k in ('architecture', 'manufacturer', 'serie', 'model',
                          'kind')
            }
            infos = DriverInfos(id=driver_id,
                                infos=meta_infos,
                                starter=self.starter,
                                connections=self.connections,
                                settings=self.settings)
        # ValueError catch wrong kind value
        except (KeyError, ValueError):
            traceback[driver_id] = format_exc()
            return

        # Get the driver class.
        d_cls = import_and_get(d_path, driver, traceback, driver_id)
        if d_cls is None:
            return

        try:
            infos.cls = d_cls
        except TypeError:
            msg = '{} should be a callable.\n{}'
            traceback[driver_id] = msg.format(d_cls, format_exc())
            return

        collector.contributions[driver_id] = infos

        self.is_registered = True

    def unregister(self, collector):
        """Remove contributed infos from the collector.

        """
        if self.is_registered:

            # Remove infos.
            driver_id = self.id
            try:
                del collector.contributions[driver_id]
            except KeyError:
                pass

            self.is_registered = False

    def __str__(self):
        """Identify the decl by its members.

        """
        members = ('driver', 'architecture', 'manufacturer', 'serie', 'model',
                   'kind', 'starter', 'connections', 'settings')
        st = '{} whose known members are :\n{}'
        st_m = '\n'.join(' - {} : {}'.format(m, v)
                         for m, v in [(m, getattr(self, m)) for m in members])
        return st.format(type(self).__name__, st_m)

    # =========================================================================
    # --- Private API ---------------------------------------------------------
    # =========================================================================

    def _default_manufacturer(self):
        """Default value grabbed from parent if not provided explicitely.

        """
        return self._get_inherited_member('manufacturer')

    def _default_serie(self):
        """Default value grabbed from parent if not provided explicitely.

        """
        return self._get_inherited_member('serie')

    def _default_kind(self):
        """Default value grabbed from parent if not provided explicitely.

        """
        return self._get_inherited_member('kind')

    def _default_architecture(self):
        """Default value grabbed from parent if not provided explicitely.

        """
        return self._get_inherited_member('architecture')

    def _default_starter(self):
        """Default value grabbed from parent if not provided explicitely.

        """
        return self._get_inherited_member('starter')

    def _default_connections(self):
        """Default value grabbed from parent if not provided explicitely.

        """
        return self._get_inherited_member('connections')

    def _default_settings(self):
        """Default value grabbed from parent if not provided explicitely.

        """
        return self._get_inherited_member('settings')

    def _get_inherited_member(self, member, parent=None):
        """Get the value of a member found in a parent declarator.

        """
        parent = parent or self.parent
        if isinstance(parent, Drivers):
            value = getattr(parent, member)
            if value:
                return value
            else:
                parent = parent.parent
        else:
            parent = None

        if parent is None:
            if member == 'settings':
                return {}  # Settings can be empty
            elif member == 'serie':
                return ''  # An instrument can have no serie
            elif member == 'kind':
                return 'Other'
            raise KeyError('No inherited member was found for %s' % member)

        return self._get_inherited_member(member, parent)

    def _get_id(self):
        """Create the unique identifier of the driver using the top level
        package the architecture and the class name.

        """
        if ':' in self.driver:
            path = self.get_path()
            d_path, d = (path + '.' +
                         self.driver if path else self.driver).split(':')

            # Build the driver id by assembling the package name, architecture
            # and the class name
            return '.'.join((d_path.split('.', 1)[0], self.architecture, d))

        else:
            return self.driver
Ejemplo n.º 16
0
class CounterAnalysis(AnalysisWithFigure):
    counter_array = Member()
    binned_array = Member()
    meas_analysis_path = Str()
    meas_data_path = Str()
    iter_analysis_path = Str()
    update_lock = Bool(False)
    enable = Bool()
    drops = Int(3)
    bins = Int(25)
    shots = Int(2)
    ROIs = List([0])
    graph_roi = Int(0)

    def __init__(self, name, experiment, description=''):
        super(CounterAnalysis, self).__init__(name, experiment, description)
        self.meas_analysis_path = 'analysis/counter_data'
        self.meas_data_path = 'data/counter/data'
        self.iter_analysis_path = 'shotData'
        self.properties += ['enable', 'drops', 'bins', 'shots', 'graph_roi']

    def preIteration(self, iterationResults, experimentResults):
        self.counter_array = []
        self.binned_array = None

    def format_data(self, array):
        """Formats raw 2D counter data into the required 4D format.

        Formats raw 2D counter data with implicit stucture:
            [   # counter 0
                [ dropped_bins shot_time_series dropped_bins shot_time_series ... ],
                # counter 1
                [ dropped_bins shot_time_series dropped_bins shot_time_series ... ]
            ]
        into the 4D format expected by the subsequent analyses"
        [   # measurements, can have different lengths run-to-run
            [   # shots array, fixed size
                [   # roi list, shot 0
                    [ time_series_roi_0 ],
                    [ time_series_roi_1 ],
                    ...
                ],
                [   # roi list, shot 1
                    [ time_series_roi_0 ],
                    [ time_series_roi_1 ],
                    ...
                ],
                ...
            ],
            ...
        ]
        """
        rois, bins = array.shape[:2]
        bins_per_shot = self.drops + self.bins  # self.bins is data bins per shot
        # calculate the number of shots dynamically
        num_shots = int(bins / (bins_per_shot))
        # calculate the number of measurements contained in the raw data
        # there may be extra shots if we get branching implemented
        num_meas = num_shots // self.shots
        # build a mask for removing valid data
        shot_mask = ([False] * self.drops + [True] * self.bins)
        good_shots = self.shots * num_meas
        # mask for the roi
        ctr_mask = np.array(shot_mask * good_shots + 0 * shot_mask *
                            (num_shots - good_shots),
                            dtype='bool')
        # apply mask a reshape partially
        array = array[:, ctr_mask].reshape(
            (rois, num_meas, self.shots, self.bins))
        array = array.swapaxes(0, 1)  # swap rois and measurement axes
        array = array.swapaxes(1, 2)  # swap rois and shots axes
        return array

    def analyzeMeasurement(self, measurementResults, iterationResults,
                           experimentResults):
        if self.enable:
            # MFE 2018/01: this analysis has been generalized such that multiple sub measurements can occur
            # in the same traditional measurement
            array = measurementResults[self.meas_data_path][()]
            try:
                # package data into an array with shape (sub measurements, shots, counters, time series data)
                array = self.format_data(array)
                # flatten the sub_measurements by converting top level to normal list and concatentating
                self.counter_array += list(array)
            except ValueError:
                errmsg = "Error retrieving counter data.  Offending counter data shape: {}"
                logger.exception(errmsg.format(array.shape))
            except:
                logger.exception('Unhandled counter data exception')
            # write this cycle's data into hdf5 file so that the threshold analysis can read it
            # when multiple counter support is enabled, the ROIs parameter will hold the count
            # Note the constant 1 is for the roi column parameter, all counters get entered in a single row
            n_meas, n_shots, n_rois, bins = array.shape
            sum_array = array.sum(axis=3).reshape((n_meas, n_shots, n_rois, 1))
            measurementResults[self.meas_analysis_path] = sum_array
            # put the sum data in the expected format for display
            if self.binned_array is None:
                self.binned_array = [
                    sum_array.reshape((n_meas, n_shots, n_rois))
                ]
            else:
                self.binned_array = np.concatenate(
                    (self.binned_array,
                     [sum_array.reshape((n_meas, n_shots, n_rois))]))
        self.updateFigure()

    def analyzeIteration(self, iterationResults, experimentResults):
        if self.enable:
            # recalculate binned_array to get rid of cut data
            # iterationResults[self.iter_analysis_path] = self.binned_array
            meas = map(int, iterationResults['measurements'].keys())
            meas.sort()
            path = 'measurements/{}/' + self.meas_analysis_path
            try:
                res = np.array(
                    [iterationResults[path.format(m)] for m in meas])
            except KeyError:
                # I was having problem with the file maybe not being ready
                logger.warning(
                    "Issue reading hdf5 file. Waiting then repeating.")
                time.sleep(0.1)  # try again in a little
                res = []
                for m in meas:
                    try:
                        res.append(iterationResults[path.format(m)])
                    except KeyError:
                        msg = ("Reading from hdf5 file during measurement `{}`"
                               " failed.").format(m)
                        logger.exception(msg)
                res = np.array(res)
            total_meas = len(self.binned_array)
            # drop superfluous ROI_columns dimension
            self.binned_array = res.reshape(res.shape[:4])
            print('cut data: {}'.format(total_meas - len(self.binned_array)))
            iterationResults[self.iter_analysis_path] = self.binned_array
        return

    def updateFigure(self):
        if self.draw_fig:
            if self.enable:
                if not self.update_lock:
                    try:
                        self.update_lock = True

                        # There are two figures in an AnalysisWithFigure.  Draw to the offscreen figure.
                        fig = self.backFigure
                        # Clear figure.
                        fig.clf()

                        # make one plot
                        # Single shot
                        ax = fig.add_subplot(221)
                        # Average over all shots/iteration
                        ax2 = fig.add_subplot(222)
                        ptr = 0
                        ca = np.array(self.counter_array)
                        for s in range(self.shots):
                            xs = np.arange(ptr, ptr + self.bins)
                            ax.bar(xs, ca[-1, s, self.graph_roi])
                            ax2.bar(xs, ca[:, s, self.graph_roi].mean(0))
                            ptr += max(1.05 * self.bins, self.bins + 1)
                        ax.set_title('Measurement: {}'.format(len(ca)))
                        ax2.set_title('Iteration average')

                        # time series of sum data
                        ax = fig.add_subplot(223)
                        # histogram of sum data
                        ax2 = fig.add_subplot(224)
                        n_shots = self.binned_array.shape[2]
                        legends = []
                        for roi in range(self.binned_array.shape[3]):
                            for s in range(n_shots):
                                ax.plot(
                                    self.binned_array[:, :, s, roi].flatten(),
                                    '.')
                                # bins = max + 2 takes care of the case where all entries are 0, which casues
                                # an error in the plot
                                ax2.hist(
                                    self.binned_array[:, :, s, roi].flatten(),
                                    bins=np.arange(
                                        np.max(self.binned_array[:, :, s, roi].
                                               flatten()) + 2),
                                    histtype='step')
                                legends.append("c{}_s{}".format(roi, s))
                        ax.set_title('Binned Data')
                        ax2.legend(legends, fontsize='small', loc=0)
                        super(CounterAnalysis, self).updateFigure()

                    except:
                        logger.exception(
                            'Problem in CounterAnalysis.updateFigure()')
                    finally:
                        self.update_lock = False
Ejemplo n.º 17
0
class StreamFile(Atom):

    spec = Typed(StreamFileSpec)
    filename = Str()
    reader = Value()

    raw_data = Value()
    timestamps = Coerced(np.ndarray)
    count = Int()
    interval = Int()
    values = List()

    interpolator = Value(None)

    @property
    def fieldname(self):
        return self.spec.fieldname

    @property
    def datatype(self):
        return self.spec.datatype

    @property
    def is_array(self):
        return self.spec.is_array

    def _default_raw_data(self):
        fdesc = open(self.filename, "r")
        fdesc.seek(0)
        buf = util.streambuf(fdesc, 1024)
        log.info("Reading StreamFile: %s" % self.spec.filename)
        data = self.reader(buf).values()
        fdesc.close()
        return data

    def _default_timestamps(self):
        timestamps = np.array([m.time() for m in self.raw_data])
        if not np.all(np.diff(timestamps) > 0):
            log.warn("Timestamps in stream: %s are not ascending" % self.fieldname)
        return timestamps

    def _default_interval(self):
        return int(np.diff(np.asarray(self.timestamps)).mean() / MS_DIVIDER)

    def _default_count(self):
        return len(self.timestamps)

    def _default_values(self):
        return [m.get() for m in self.raw_data]

    def _default_interpolator(self):
        if not self.is_array:
            if self.datatype == "position2d":
                return math.linearInterpolateVector2
            elif self.datatype == "position3d":
                return math.linearInterpolateVector3
            elif self.datatype == "quat":
                return math.linearInterpolateQuaternion
            elif self.datatype == "pose":
                return math.linearInterpolatePose
        return None

    def get(self, idx, dts, selector=None):
        src_ts = self.timestamps

        if selector == 'index':
            return self.values[idx]

        # compute index from dts
        idx = (np.abs(src_ts-dts)).argmin()

        # None == select only matching
        if selector is None or selector == "matching":
            if src_ts[idx] == dts:
                return self.values[idx]
            else:
                return None

        # interpolation or nearest neighbors
        if dts < src_ts[idx]:
            if idx == 0:
                ts1 = src_ts[idx]
                idx1 = idx
            else:
                ts1 = src_ts[idx-1]
                idx1 = idx-1
            ts2 = src_ts[idx]
            idx2 = idx
        else:
            ts1 = src_ts[idx]
            idx1 = idx
            if not idx+1 < self.count:
                ts2 = src_ts[idx]
                idx2 = idx
            else:
                ts2 = src_ts[idx+1]
                idx2 = idx+1

        ediff = ts2 - ts1
        tdiff = dts - ts1

        h = 1.0
        if ediff != 0:
            h = float(tdiff) / float(ediff)

        if selector == "nearest":
            if h < 0.5:
                return self.values[idx1]
            else:
                return self.values[idx2]

        if self.interpolator is not None and selector == "interpolate":
            return self.interpolator(self.values[idx1], self.values[idx2], h)

        raise NotImplemented("Interpolation for %s%s is not implemented." % (self.datatype, "-list" if self.is_array else ""))
class CalibrationStartController(CalibrationController):
    save_results = False
    show_facade_controls = False

    domain_name = Str()
    setup_name = Str()
    user_name = Str()
    platform_name = Str()

    def _default_domain_name(self):
        cfg = self.wizard_state.context.get('config')
        if cfg is not None and cfg.has_option("calibration_wizard", "domain"):
            return cfg.get("calibration_wizard", "domain")
        return "ubitrack.local"

    def _default_setup_name(self):
        cfg = self.wizard_state.context.get('config')
        if cfg is not None and cfg.has_option("calibration_wizard", "setup"):
            return cfg.get("calibration_wizard", "setup")
        return "default"

    def _default_user_name(self):
        cfg = self.wizard_state.context.get('config')
        if cfg is not None and cfg.has_option("calibration_wizard", "user"):
            return cfg.get("calibration_wizard", "user")
        return "default"

    def _default_platform_name(self):
        cfg = self.wizard_state.context.get('config')
        if cfg is not None and cfg.has_option("calibration_wizard",
                                              "platform"):
            return cfg.get("calibration_wizard", "platform")
        return sys.platform

    def setupController(self, active_widgets=None):
        super(CalibrationStartController,
              self).setupController(active_widgets=active_widgets)
        self.wizard_state.calibration_domain_name = self.domain_name
        self.wizard_state.calibration_setup_name = self.setup_name
        self.wizard_state.calibration_user_name = self.user_name
        self.wizard_state.calibration_platform_name = self.platform_name

    def teardownController(self, active_widgets=None):
        if self.wizard_state.calibration_existing_delete_files:
            cfg = self.context.get("config")
            sname = "%s.initialize_files" % (self.config_ns, )
            init_files = {}
            if cfg.has_section(sname):
                init_files = dict(cfg.items(sname))

            for m in self.wizard_state.module_manager.modules.values():
                if m.is_enabled():
                    cfs = m.get_calib_files()
                    for cf in cfs:
                        bb_cf = os.path.basename(cf)
                        if bb_cf in init_files:
                            if os.path.isfile(init_files[bb_cf]):
                                log.info(
                                    "Set calibration file to default: %s" %
                                    bb_cf)
                                shutil.copyfile(init_files[bb_cf], cf)
                            else:
                                log.warn(
                                    "Missing default calibration file: %s" %
                                    init_files[bb_cf])
                        elif os.path.isfile(cf):
                            log.info("Deleting calibration file: %s" % cf)
                            os.unlink(cf)
Ejemplo n.º 19
0
class Hello(Atom):

    message = Str('Hello')
Ejemplo n.º 20
0
class DrawImageAdvanced(Atom):
    """
    This class performs 2D image rendering, such as showing multiple
    2D fitting or roi images based on user's selection.

    Attributes
    ----------
    img_data : dict
        dict of 2D array
    fig : object
        matplotlib Figure
    file_name : str
    stat_dict : dict
        determine which image to show
    data_dict : dict
        multiple data sets to plot, such as fit data, or roi data
    data_dict_keys : list
    data_opt : int
        index to show which data is chosen to plot
    dict_to_plot : dict
        selected data dict to plot, i.e., fitting data or roi is selected
    items_in_selected_group : list
        keys of dict_to_plot
    scale_opt : str
        linear or log plot
    color_opt : str
        orange or gray plot
    scaler_norm_dict : dict
        scaler normalization data, from data_dict
    scaler_items : list
        keys of scaler_norm_dict
    scaler_name_index : int
        index to select on GUI level
    scaler_data : None or numpy
        selected scaler data
    x_pos : list
        define data range in horizontal direction
    y_pos : list
        define data range in vertical direction
    pixel_or_pos : int
        index to choose plot with pixel (== 0) or with positions (== 1)
    grid_interpolate: bool
        choose to interpolate 2D image in terms of x,y or not
    limit_dict : Dict
        save low and high limit for image scaling
    """

    fig = Typed(Figure)
    stat_dict = Dict()
    data_dict = Dict()
    data_dict_keys = List()
    data_opt = Int(0)
    dict_to_plot = Dict()
    items_in_selected_group = List()
    items_previous_selected = List()

    scale_opt = Str('Linear')
    color_opt = Str('viridis')
    img_title = Str()

    scaler_norm_dict = Dict()
    scaler_items = List()
    scaler_name_index = Int()
    scaler_data = Typed(object)

    x_pos = List()
    y_pos = List()

    pixel_or_pos = Int(0)
    grid_interpolate = Bool(False)
    data_dict_default = Dict()
    limit_dict = Dict()
    range_dict = Dict()
    scatter_show = Bool(False)
    name_not_scalable = List()

    def __init__(self):
        self.fig = plt.figure(figsize=(3, 2))
        matplotlib.rcParams['axes.formatter.useoffset'] = True

        # Do not apply scaler norm on following data
        self.name_not_scalable = [
            'r2_adjust', 'alive', 'dead', 'elapsed_time', 'scaler_alive',
            'i0_time', 'time', 'time_diff', 'dwell_time'
        ]

    def data_dict_update(self, change):
        """
        Observer function to be connected to the fileio model
        in the top-level gui.py startup

        Parameters
        ----------
        changed : dict
            This is the dictionary that gets passed to a function
            with the @observe decorator
        """
        self.data_dict = change['value']

    def set_default_dict(self, data_dict):
        self.data_dict_default = copy.deepcopy(data_dict)

    @observe('data_dict')
    def init_plot_status(self, change):
        scaler_groups = [
            v for v in list(self.data_dict.keys()) if 'scaler' in v
        ]
        if len(scaler_groups) > 0:
            # self.scaler_group_name = scaler_groups[0]
            self.scaler_norm_dict = self.data_dict[scaler_groups[0]]
            # for GUI purpose only
            self.scaler_items = []
            self.scaler_items = list(self.scaler_norm_dict.keys())
            self.scaler_items.sort()
            self.scaler_data = None

        # init of pos values
        self.pixel_or_pos = 0

        if 'positions' in self.data_dict:
            try:
                logger.debug(
                    f"Position keys: {list(self.data_dict['positions'].keys())}"
                )
                self.x_pos = list(self.data_dict['positions']['x_pos'][0, :])
                self.y_pos = list(self.data_dict['positions']['y_pos'][:, -1])
                # when we use imshow, the x and y start at lower left,
                # so flip y, we want y starts from top left
                self.y_pos.reverse()

            except KeyError:
                pass
        else:
            self.x_pos = []
            self.y_pos = []

        self.get_default_items()  # use previous defined elements as default
        logger.info('Use previously selected items as default: {}'.format(
            self.items_previous_selected))

        # initiate the plotting status once new data is coming
        self.reset_to_default()
        self.data_dict_keys = []
        self.data_dict_keys = list(self.data_dict.keys())
        logger.debug(
            'The following groups are included for 2D image display: {}'.
            format(self.data_dict_keys))

        self.show_image()

    def reset_to_default(self):
        """Set variables to default values as initiated.
        """
        self.data_opt = 0
        # init of scaler for normalization
        self.scaler_name_index = 0
        self.plot_deselect_all()

    def get_default_items(self):
        """Add previous selected items as default.
        """
        if len(self.items_previous_selected) != 0:
            default_items = {}
            for item in self.items_previous_selected:
                for v, k in self.data_dict.items():
                    if item in k:
                        default_items[item] = k[item]
            self.data_dict['use_default_selection'] = default_items

    @observe('data_opt')
    def _update_file(self, change):
        try:
            if self.data_opt == 0:
                self.dict_to_plot = {}
                self.items_in_selected_group = []
                self.set_stat_for_all(bool_val=False)
                self.img_title = ''
            elif self.data_opt > 0:
                # self.set_stat_for_all(bool_val=False)
                plot_item = sorted(self.data_dict_keys)[self.data_opt - 1]
                self.img_title = str(plot_item)
                self.dict_to_plot = self.data_dict[plot_item]
                self.set_stat_for_all(bool_val=False)

                self.update_img_wizard_items()
                self.get_default_items(
                )  # get default elements every time when fitting is done

        except IndexError:
            pass

    @observe('scaler_name_index')
    def _get_scaler_data(self, change):
        if change['type'] == 'create':
            return

        if self.scaler_name_index == 0:
            self.scaler_data = None
        else:
            try:
                scaler_name = self.scaler_items[self.scaler_name_index - 1]
            except IndexError:
                scaler_name = None
            if scaler_name:
                self.scaler_data = self.scaler_norm_dict[scaler_name]
                logger.info('Use scaler data to normalize, '
                            'and the shape of scaler data is {}, '
                            'with (low, high) as ({}, {})'.format(
                                self.scaler_data.shape,
                                np.min(self.scaler_data),
                                np.max(self.scaler_data)))
        self.set_low_high_value(
        )  # reset low high values based on normalization
        self.show_image()
        self.update_img_wizard_items()

    def update_img_wizard_items(self):
        """This is for GUI purpose only.
        Table items will not be updated if list items keep the same.
        """
        self.items_in_selected_group = []
        self.items_in_selected_group = list(self.dict_to_plot.keys())

    def format_img_wizard_limit(self, value):
        """
        This function is used for formatting of range values in 'Image Wizard'.
        The presentation of the number was tweaked so that it is nicely formatted
           in the enaml field with adequate precision.

        ..note::

        The function is called externally from 'enaml' code.

        Parameters:
        ===========
        value : float
            The value to be formatted

        Returns:
        ========
        str - the string representation of the floating point variable
        """
        if value != 0:
            value_log10 = math.log10(abs(value))
        else:
            value_log10 = 0
        if (value_log10 > 3) or (value_log10 < -3):
            return f"{value:.6e}"
        return f"{value:.6f}"

    @observe('scale_opt', 'color_opt')
    def _update_scale(self, change):
        if change['type'] != 'create':
            self.show_image()

    @observe('pixel_or_pos')
    def _update_pp(self, change):
        self.show_image()

    @observe('grid_interpolate')
    def _update_gi(self, change):
        self.show_image()

    def plot_select_all(self):
        self.set_stat_for_all(bool_val=True)

    def plot_deselect_all(self):
        self.set_stat_for_all(bool_val=False)

    @observe('scatter_show')
    def _change_image_plot_method(self, change):
        if change['type'] != 'create':
            self.show_image()

    def set_stat_for_all(self, bool_val=False):
        """
        Set plotting status for all the 2D images, including low and high values.
        """
        self.stat_dict.clear()
        self.stat_dict = {k: bool_val for k in self.dict_to_plot.keys()}

        self.limit_dict.clear()
        self.limit_dict = {
            k: {
                'low': 0.0,
                'high': 100.0
            }
            for k in self.dict_to_plot.keys()
        }

        self.set_low_high_value()

    def set_low_high_value(self):
        """Set default low and high values based on normalization for each image.
        """
        # do not apply scaler norm on not scalable data
        self.range_dict.clear()
        for data_name in self.dict_to_plot.keys():
            data_arr = normalize_data_by_scaler(
                self.dict_to_plot[data_name],
                self.scaler_data,
                data_name=data_name,
                name_not_scalable=self.name_not_scalable)
            lowv = np.min(data_arr)
            highv = np.max(data_arr)
            self.range_dict[data_name] = {
                'low': lowv,
                'low_default': lowv,
                'high': highv,
                'high_default': highv
            }

    def reset_low_high(self, name):
        """Reset low and high value to default based on normalization.
        """
        self.range_dict[name]['low'] = self.range_dict[name]['low_default']
        self.range_dict[name]['high'] = self.range_dict[name]['high_default']
        self.limit_dict[name]['low'] = 0.0
        self.limit_dict[name]['high'] = 100.0
        self.update_img_wizard_items()
        self.show_image()

    def show_image(self):
        self.fig.clf()
        stat_temp = self.get_activated_num()
        stat_temp = OrderedDict(
            sorted(six.iteritems(stat_temp), key=lambda x: x[0]))

        # Check if positions data is available. Positions data may be unavailable
        # (not recorded in HDF5 file) if experiment is has not been completed.
        # While the data from the completed part of experiment may still be used,
        # plotting vs. x-y or scatter plot may not be displayed.
        positions_data_available = False
        if 'positions' in self.data_dict.keys():
            positions_data_available = True

        # Create local copies of self.pixel_or_pos, self.scatter_show and self.grid_interpolate
        pixel_or_pos_local = self.pixel_or_pos
        scatter_show_local = self.scatter_show
        grid_interpolate_local = self.grid_interpolate

        # Disable plotting vs x-y coordinates if 'positions' data is not available
        if not positions_data_available:
            if pixel_or_pos_local:
                pixel_or_pos_local = 0  # Switch to plotting vs. pixel number
                logger.error(
                    "'Positions' data is not available. Plotting vs. x-y coordinates is disabled"
                )
            if scatter_show_local:
                scatter_show_local = False  # Switch to plotting vs. pixel number
                logger.error(
                    "'Positions' data is not available. Scatter plot is disabled."
                )
            if grid_interpolate_local:
                grid_interpolate_local = False  # Switch to plotting vs. pixel number
                logger.error(
                    "'Positions' data is not available. Interpolation is disabled."
                )

        low_lim = 1e-4  # define the low limit for log image
        plot_interp = 'Nearest'

        if self.scaler_data is not None:
            if np.count_nonzero(self.scaler_data) == 0:
                logger.warning('scaler is zero - scaling was not applied')
            elif len(self.scaler_data[self.scaler_data == 0]) > 0:
                logger.warning('scaler data has zero values')

        grey_use = self.color_opt

        ncol = int(np.ceil(np.sqrt(len(stat_temp))))
        try:
            nrow = int(np.ceil(len(stat_temp) / float(ncol)))
        except ZeroDivisionError:
            ncol = 1
            nrow = 1

        a_pad_v = 0.8
        a_pad_h = 0.5

        grid = ImageGrid(self.fig,
                         111,
                         nrows_ncols=(nrow, ncol),
                         axes_pad=(a_pad_v, a_pad_h),
                         cbar_location='right',
                         cbar_mode='each',
                         cbar_size='7%',
                         cbar_pad='2%',
                         share_all=True)

        def _compute_equal_axes_ranges(x_min, x_max, y_min, y_max):
            """
            Compute ranges for x- and y- axes of the plot. Make sure that the ranges for x- and y-axes are
            always equal and fit the maximum of the ranges for x and y values:
                  max(abs(x_max-x_min), abs(y_max-y_min))
            The ranges are set so that the data is always centered in the middle of the ranges

            Parameters
            ----------

            x_min, x_max, y_min, y_max : float
                lower and upper boundaries of the x and y values

            Returns
            -------

            x_axis_min, x_axis_max, y_axis_min, y_axis_max : float
                lower and upper boundaries of the x- and y-axes ranges
            """

            x_axis_min, x_axis_max, y_axis_min, y_axis_max = x_min, x_max, y_min, y_max
            x_range, y_range = abs(x_max - x_min), abs(y_max - y_min)
            if x_range > y_range:
                y_center = (y_max + y_min) / 2
                y_axis_max = y_center + x_range / 2
                y_axis_min = y_center - x_range / 2
            else:
                x_center = (x_max + x_min) / 2
                x_axis_max = x_center + y_range / 2
                x_axis_min = x_center - y_range / 2

            return x_axis_min, x_axis_max, y_axis_min, y_axis_max

        def _adjust_data_range_using_min_ratio(c_min,
                                               c_max,
                                               c_axis_range,
                                               *,
                                               min_ratio=0.01):
            """
            Adjust the range for plotted data along one axis (x or y). The adjusted range is
            applied to the 'extend' attribute of imshow(). The adjusted range is always greater
            than 'axis_range * min_ratio'. Such transformation has no physical meaning
            and performed for aesthetic reasons: stretching the image presentation of
            a scan with only a few lines (1-3) greatly improves visibility of data.

            Parameters
            ----------

            c_min, c_max : float
                boundaries of the data range (along x or y axis)
            c_axis_range : float
                range presented along the same axis

            Returns
            -------

            cmin, c_max : float
                adjusted boundaries of the data range
            """
            c_range = c_max - c_min
            if c_range < c_axis_range * min_ratio:
                c_center = (c_max + c_min) / 2
                c_new_range = c_axis_range * min_ratio
                c_min = c_center - c_new_range / 2
                c_max = c_center + c_new_range / 2
            return c_min, c_max

        for i, (k, v) in enumerate(six.iteritems(stat_temp)):

            data_dict = normalize_data_by_scaler(
                data_in=self.dict_to_plot[k],
                scaler=self.scaler_data,
                data_name=k,
                name_not_scalable=self.name_not_scalable)

            if pixel_or_pos_local or scatter_show_local:

                # xd_min, xd_max, yd_min, yd_max = min(self.x_pos), max(self.x_pos),
                #     min(self.y_pos), max(self.y_pos)
                x_pos_2D = self.data_dict['positions']['x_pos']
                y_pos_2D = self.data_dict['positions']['y_pos']
                xd_min, xd_max, yd_min, yd_max = x_pos_2D.min(), x_pos_2D.max(
                ), y_pos_2D.min(), y_pos_2D.max()
                xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = \
                    _compute_equal_axes_ranges(xd_min, xd_max, yd_min, yd_max)

                xd_min, xd_max = _adjust_data_range_using_min_ratio(
                    xd_min, xd_max, xd_axis_max - xd_axis_min)
                yd_min, yd_max = _adjust_data_range_using_min_ratio(
                    yd_min, yd_max, yd_axis_max - yd_axis_min)

                # Adjust the direction of each axis depending on the direction in which encoder values changed
                #   during the experiment. Data is plotted starting from the upper-right corner of the plot
                if x_pos_2D[0, 0] > x_pos_2D[0, -1]:
                    xd_min, xd_max, xd_axis_min, xd_axis_max = xd_max, xd_min, xd_axis_max, xd_axis_min
                if y_pos_2D[0, 0] > y_pos_2D[-1, 0]:
                    yd_min, yd_max, yd_axis_min, yd_axis_max = yd_max, yd_min, yd_axis_max, yd_axis_min

            else:

                yd, xd = data_dict.shape

                xd_min, xd_max, yd_min, yd_max = 0, xd, 0, yd
                if (yd <= math.floor(xd / 100)) and (xd >= 200):
                    yd_min, yd_max = -math.floor(xd / 200), math.ceil(xd / 200)
                if (xd <= math.floor(yd / 100)) and (yd >= 200):
                    xd_min, xd_max = -math.floor(yd / 200), math.ceil(yd / 200)

                xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = \
                    _compute_equal_axes_ranges(xd_min, xd_max, yd_min, yd_max)

            if self.scale_opt == 'Linear':

                low_ratio = self.limit_dict[k]['low'] / 100.0
                high_ratio = self.limit_dict[k]['high'] / 100.0
                if self.scaler_data is None:
                    minv = self.range_dict[k]['low']
                    maxv = self.range_dict[k]['high']
                else:
                    # Unfortunately, the new normalization procedure requires to recalculate min and max values
                    minv = np.min(data_dict)
                    maxv = np.max(data_dict)
                low_limit = (maxv - minv) * low_ratio + minv
                high_limit = (maxv - minv) * high_ratio + minv

                # Set some minimum range for the colorbar (otherwise it will have white fill)
                if math.isclose(low_limit, high_limit, abs_tol=2e-20):
                    if abs(low_limit) < 1e-20:  # The value is zero
                        dv = 1e-20
                    else:
                        dv = math.fabs(low_limit * 0.01)
                    high_limit += dv
                    low_limit -= dv

                if not scatter_show_local:
                    if grid_interpolate_local:
                        data_dict, _, _ = grid_interpolate(
                            data_dict, self.data_dict['positions']['x_pos'],
                            self.data_dict['positions']['y_pos'])
                    im = grid[i].imshow(data_dict,
                                        cmap=grey_use,
                                        interpolation=plot_interp,
                                        extent=(xd_min, xd_max, yd_max,
                                                yd_min),
                                        origin='upper',
                                        clim=(low_limit, high_limit))
                    grid[i].set_ylim(yd_axis_max, yd_axis_min)
                else:
                    xx = self.data_dict['positions']['x_pos']
                    yy = self.data_dict['positions']['y_pos']

                    # The following condition prevents crash if different file is loaded while
                    #    the scatter plot is open (PyXRF specific issue)
                    if data_dict.shape == xx.shape and data_dict.shape == yy.shape:
                        im = grid[i].scatter(
                            xx,
                            yy,
                            c=data_dict,
                            marker='s',
                            s=500,
                            alpha=1.0,  # Originally: alpha=0.8
                            cmap=grey_use,
                            vmin=low_limit,
                            vmax=high_limit,
                            linewidths=1,
                            linewidth=0)
                        grid[i].set_ylim(yd_axis_max, yd_axis_min)

                grid[i].set_xlim(xd_axis_min, xd_axis_max)

                grid_title = k
                grid[i].text(0,
                             1.01,
                             grid_title,
                             ha='left',
                             va='bottom',
                             transform=grid[i].axes.transAxes)

                grid.cbar_axes[i].colorbar(im)
                im.colorbar.formatter = im.colorbar.cbar_axis.get_major_formatter(
                )
                # im.colorbar.ax.get_xaxis().set_ticks([])
                # im.colorbar.ax.get_xaxis().set_ticks([], minor=True)
                grid.cbar_axes[i].ticklabel_format(style='sci',
                                                   scilimits=(-3, 4),
                                                   axis='both')

                #  Do not remove this code, may be useful in the future (Dmitri G.) !!!
                #  Print label for colorbar
                # cax = grid.cbar_axes[i]
                # axis = cax.axis[cax.orientation]
                # axis.label.set_text("$[a.u.]$")

            else:

                maxz = np.max(data_dict)
                # Set some reasonable minimum range for the colorbar
                #   Zeros or negative numbers will be shown in white
                if maxz <= 1e-30:
                    maxz = 1

                if not scatter_show_local:
                    if grid_interpolate_local:
                        data_dict, _, _ = grid_interpolate(
                            data_dict, self.data_dict['positions']['x_pos'],
                            self.data_dict['positions']['y_pos'])
                    im = grid[i].imshow(data_dict,
                                        norm=LogNorm(vmin=low_lim * maxz,
                                                     vmax=maxz,
                                                     clip=True),
                                        cmap=grey_use,
                                        interpolation=plot_interp,
                                        extent=(xd_min, xd_max, yd_max,
                                                yd_min),
                                        origin='upper',
                                        clim=(low_lim * maxz, maxz))
                    grid[i].set_ylim(yd_axis_max, yd_axis_min)
                else:
                    im = grid[i].scatter(
                        self.data_dict['positions']['x_pos'],
                        self.data_dict['positions']['y_pos'],
                        norm=LogNorm(vmin=low_lim * maxz, vmax=maxz,
                                     clip=True),
                        c=data_dict,
                        marker='s',
                        s=500,
                        alpha=1.0,  # Originally: alpha=0.8
                        cmap=grey_use,
                        linewidths=1,
                        linewidth=0)
                    grid[i].set_ylim(yd_axis_min, yd_axis_max)

                grid[i].set_xlim(xd_axis_min, xd_axis_max)

                grid_title = k
                grid[i].text(0,
                             1.01,
                             grid_title,
                             ha='left',
                             va='bottom',
                             transform=grid[i].axes.transAxes)

                grid.cbar_axes[i].colorbar(im)
                im.colorbar.formatter = im.colorbar.cbar_axis.get_major_formatter(
                )
                im.colorbar.ax.get_xaxis().set_ticks([])
                im.colorbar.ax.get_xaxis().set_ticks([], minor=True)
                im.colorbar.cbar_axis.set_minor_formatter(
                    mticker.LogFormatter())

            grid[i].get_xaxis().set_major_locator(
                mticker.MaxNLocator(nbins="auto"))
            grid[i].get_yaxis().set_major_locator(
                mticker.MaxNLocator(nbins="auto"))

            grid[i].get_xaxis().get_major_formatter().set_useOffset(False)
            grid[i].get_yaxis().get_major_formatter().set_useOffset(False)

        self.fig.suptitle(self.img_title, fontsize=20)
        self.fig.canvas.draw_idle()

    def get_activated_num(self):
        """Collect the selected items for plotting.
        """
        current_items = {
            k: v
            for (k, v) in six.iteritems(self.stat_dict) if v is True
        }
        return current_items

    def record_selected(self):
        """Save the list of items in cache for later use.
        """
        self.items_previous_selected = [
            k for (k, v) in self.stat_dict.items() if v is True
        ]
        logger.info('Items are set as default: {}'.format(
            self.items_previous_selected))
        self.data_dict['use_default_selection'] = {
            k: self.dict_to_plot[k]
            for k in self.items_previous_selected
        }
        self.data_dict_keys = list(self.data_dict.keys())
Ejemplo n.º 21
0
class AbstractPattern(UTQLSchemaEntity):
    name = Str()
    displayName = Str()
    description = Str()
Ejemplo n.º 22
0
class SpinBox(Control):
    """ A spin box widget which manipulates integer values.

    """
    #: The minimum value for the spin box. Defaults to 0.
    minimum = d_(Int(0))

    #: The maximum value for the spin box. Defaults to 100.
    maximum = d_(Int(100))

    #: The position value of the spin box. The value will be clipped to
    #: always fall between the minimum and maximum.
    value = d_(Int(0))

    #: An optional prefix to include in the displayed text.
    prefix = d_(Str())

    #: An optional suffix to include in the displayed text.
    suffix = d_(Str())

    #: Optional text to display when the spin box is at its minimum.
    #: This allows the developer to indicate to the user a special
    #: significance to the minimum value e.g. "Auto"
    special_value_text = d_(Str())

    #: The step size for the spin box. Defaults to 1.
    single_step = d_(Range(low=1))

    #: Whether or not the spin box is read-only. If True, the user
    #: will not be able to edit the values in the spin box, but they
    #: will still be able to copy the text to the clipboard.
    read_only = d_(Bool(False))

    #: Whether or not the spin box will wrap around at its extremes.
    #: Defaults to False.
    wrapping = d_(Bool(False))

    #: A spin box expands freely in width by default.
    hug_width = set_default('ignore')

    #: A reference to the ProxySpinBox object.
    proxy = Typed(ProxySpinBox)

    #--------------------------------------------------------------------------
    # Observers
    #--------------------------------------------------------------------------
    @observe('minimum', 'maximum', 'value', 'prefix', 'suffix',
             'special_value_text', 'single_step', 'read_only', 'wrapping')
    def _update_proxy(self, change):
        """ An observer which sends state change to the proxy.

        """
        # The superclass handler implementation is sufficient.
        super(SpinBox, self)._update_proxy(change)

    #--------------------------------------------------------------------------
    # PostSetAttr Handlers
    #--------------------------------------------------------------------------
    def _post_setattr_minimum(self, old, new):
        """ Post setattr the minimum value for the spin box.

        If the new minimum is greater than the current value or maximum,
        those values are adjusted up.

        """
        if new > self.maximum:
            self.maximum = new
        if new > self.value:
            self.value = new

    def _post_setattr_maximum(self, old, new):
        """ Post setattr the maximum value for the spin box.

        If the new maximum is less than the current value or the minimum,
        those values are adjusted down.

        """
        if new < self.minimum:
            self.minimum = new
        if new < self.value:
            self.value = new

    #--------------------------------------------------------------------------
    # Post Validation Handlers
    #--------------------------------------------------------------------------
    def _post_validate_value(self, old, new):
        """ Post validate the value for the spin box.

        The value is clipped to minimum and maximum bounds.

        """
        return max(self.minimum, min(new, self.maximum))
Ejemplo n.º 23
0
class TemplateInstanceNode(CompilerNode):
    """ A compiler node which represents a template instantiation.

    """
    #: The template node which is invoked to generate the object.
    template = Typed(TemplateNode)

    #: The named identifiers for the instantiated objects.
    names = Tuple()

    #: The starname identifier for the instantiated objects.
    starname = Str()

    def __call__(self, parent):
        """ Invoke the template instantiation to build the objects.

        Parameters
        ----------
        parent : Declarative
            The parent declarative object for the instantiation.

        """
        instances = self.template(parent)
        f_locals = peek_scope()
        if self.names:
            for name, instance in zip(self.names, instances):
                f_locals[name] = instance
        if self.starname:
            f_locals[self.starname] = tuple(instances[len(self.names):])
        return instances

    def update_id_nodes(self, mapping):
        """ Update the id nodes for this node.

        Parameters
        ----------
        mapping : sortedmap
            The mapping to fill with the identifier information.

        """
        if self.names:
            nodeiter = self.iternodes()
            for name in self.names:
                mapping[name] = next(nodeiter)
        super(TemplateInstanceNode, self).update_id_nodes(mapping)

    def size(self):
        """ Return the size of the instantiated node.

        """
        return self.template.size()

    def iternodes(self):
        """ Iterate over the nodes of the instantiation.

        Returns
        -------
        result : generator
            A generator which yields the unrolled nodes of the template
            instantiation.

        """
        return self.template.iternodes()

    def copy(self):
        """ Create a copy of the node.

        """
        node = super(TemplateInstanceNode, self).copy()
        node.template = self.template.copy()
        node.names = self.names
        node.starname = self.starname
        return node
Ejemplo n.º 24
0
class SetRFFrequencyTask(InterfaceableTaskMixin, InstrumentTask):
    """Set the frequency of the signal delivered by a RF source.

    """
    # Target frequency (dynamically evaluated)
    frequency = Str().tag(pref=True)

    # Unit of the frequency
    unit = Enum('GHz', 'MHz', 'kHz', 'Hz').tag(pref=True)

    # Whether to start the source if its output is off.
    auto_start = Bool(False).tag(pref=True)

    task_database_entries = set_default({'frequency': 1.0, 'unit': 'GHz'})
    loopable = True
    driver_list = ['AgilentE8257D', 'AnritsuMG3694', 'LabBrickLMS103']

    def check(self, *args, **kwargs):
        """
        """
        test, traceback = super(SetRFFrequencyTask,
                                self).check(*args, **kwargs)
        if self.frequency:
            try:
                freq = self.format_and_eval_string(self.frequency)
                self.write_in_database('frequency', freq)
            except Exception as e:
                test = False
                traceback[self.task_path + '/' + self.task_name + '-freq'] = \
                    cleandoc('''Failed to eval the frequency
                        formula {}: {}'''.format(self.frequency, e))

        self.write_in_database('unit', self.unit)

        return test, traceback

    def i_perform(self, frequency=None):
        """

        """
        if not self.driver:
            self.start_driver()
            if self.auto_start:
                self.driver.output = 'On'

        if frequency is None:
            frequency = self.format_and_eval_string(self.frequency)

        self.driver.frequency_unit = self.unit
        self.driver.frequency = frequency
        self.write_in_database('frequency', frequency)

    def convert(self, frequency, unit):
        """ Convert a frequency to the given unit.

        Parameters
        ----------
        frequency : float
            Frequency expressed in the task unit

        unit : {'Hz', 'kHz', 'MHz', 'GHz'}
            Unit in which to express the result

        Returns
        -------
        converted_frequency : float

        """
        return frequency * CONVERSION_FACTORS[self.unit][unit]
Ejemplo n.º 25
0
class Aerotechs(Instrument, Analysis):
    version = '2015.06.22'
    IP = Str()
    port = Int()
    motors = Member()
    socket = Member()
    OneAerotech = Member()

    def __init__(self, name, experiment, description=''):
        super(Aerotechs, self).__init__(name, experiment, description)
        self.motors = ListProp('motors',
                               experiment,
                               'A list of individual Aerotech stages',
                               listElementType=Aerotech,
                               listElementName='motor')
        self.OneAerotech = Aerotech('OneAerotech', experiment,
                                    'One Aerotech Ensemble')
        self.properties += ['version', 'IP', 'port', 'motors', 'OneAerotech']

    def launchServer(self):
        #subprocess.Popen(["C:\\Windows\\System32\\cmd.exe","/C","..\\csharp\\Aerotech_Ensemble_Server\\bin\\Debug\\Ensemble Console Example CSharp.exe"], creationflags=subprocess.CREATE_NEW_CONSOLE)
        return

    def preExperiment(self, hdf5):
        """Open the TCP socket"""
        if self.enable:
            self.OneAerotech.myController = Controller.Connect()[0]
            logger.debug("Aerotech: preExperiment: sending WaitForGlobals")
            self.OneAerotech.waitForGlobals()
            # TODO: add here some sort of communications check to see if it worked

            self.isInitialized = True

    def postMeasurement(self, callback, measurementresults, iterationresults,
                        hdf5):
        super(Aerotechs, self).postMeasurement(callback, measurementresults,
                                               iterationresults, hdf5)
        return

    def postIteration(self, iterationresults, hdf5):
        if self.enable:
            self.OneAerotech.waitForGlobals()
        super(Aerotechs, self).postIteration(iterationresults, hdf5)
        return

    def postExperiment(self, hdf5):
        return

    def finalize(self, hdf5):
        return

    def preIteration(self, iterationresults, hdf5):
        """
        Every iteration, send the motors updated positions.
        """
        #print "Running aerotech preIteration"
        if (not self.isInitialized):
            self.preExperiment(hdf5)
        if self.enable:
            msg = ''
            try:
                self.OneAerotech.update()
            except Exception as e:
                logger.error(
                    'Problem setting Aerotech globals, closing socket:\n{}\n{}\n'
                    .format(msg, e))
                self.isInitialized = False
                raise PauseError

    def parUpdate(self):
        """
        Passes WaitForGlobals value update and then runs the preiteration step
        :return:
        """
        self.preIteration(0, 0)
        self.OneAerotech.waitForGlobals()
        self.preIteration(0, 0)
        return

    def start(self):
        self.isDone = True
        return

    def update(self):
        self.preIteration(0, 0)
        return

    def initialize(self):
        self.preExperiment(0)
Ejemplo n.º 26
0
class InstrManagerPlugin(HasPrefPlugin):
    """
    """
    # --- Public API ----------------------------------------------------------

    # Directories in which the profiles are looked for.
    profiles_folders = List(
        Unicode(), [os.path.join(PACKAGE_PATH, 'profiles')]).tag(pref=True)

    # Drivers loading exception
    drivers_loading = List(Unicode()).tag(pref=True)

    # Name of the known driver types.
    driver_types = List()

    # Name of the known drivers.
    drivers = List()

    # Name of the known profiles.
    all_profiles = List()

    # Name of the currently available profiles.
    available_profiles = List()

    def start(self):
        """ Start the plugin life-cycle.

        This method is called by the framework at the appropriate time.
        It should never be called by user code.

        """
        super(InstrManagerPlugin, self).start()
        path = os.path.join(PACKAGE_PATH, 'profiles')
        if not os.path.isdir(path):
            os.mkdir(path)
        self._refresh_drivers()
        self._refresh_forms()
        self._refresh_profiles_map()
        self._refresh_users()
        self._bind_observers()

    def stop(self):
        """ Stop the plugin life-cycle.

        This method is called by the framework at the appropriate time.
        It should never be called by user code.

        """
        super(InstrManagerPlugin, self).stop()
        self._unbind_observers()
        self._users.clear()
        self._driver_types.clear()
        self._drivers.clear()
        self._forms.clear()
        self._profiles_map.clear()
        self._used_profiles.clear()

    def driver_types_request(self, driver_types):
        """ Give access to the driver type implementation

        Parameters
        ----------
        driver_types : list(str)
            The names of the driver types the user want to get the classes.

        Returns
        -------
        driver_types : dict
            The required driver types that have been found as a dict
            {name: class}.

        missing : list
            The list of drivers that was not found.

        """
        missing = [
            driver_type for driver_type in driver_types
            if driver_type not in self.driver_types
        ]

        found = {
            key: val
            for key, val in self._driver_types.iteritems()
            if key in driver_types
        }

        return found, missing

    def drivers_request(self, drivers):
        """ Give access to the driver type implementation

        Parameters
        ----------
        drivers : list(str)
            The names of the drivers the user want to get the classes.

        Returns
        -------
        drivers : dict
            The required driver that have been found as a dict {name: class}.

        missing : list
            The list of drivers that was not found.

        """
        missing = [driver for driver in drivers if driver not in self.drivers]

        found = {
            key: val
            for key, val in self._drivers.iteritems() if key in drivers
        }

        return found, missing

    def profile_path(self, profile):
        """ Request the path of the file storing a profile

        Beware this path should not be used to establish a during communication
        with an instrument as it by-pass the manager securities.

        Parameters
        ----------
        profile : str
            Name of the profile for which the path to its file should be
            returned.

        Returns
        -------
        path : unicode or None
            The absolute path to the file in which the profile is stored, if
            the profile is known.

        """
        return self._profiles_map.get(profile, None)

    def profiles_request(self, new_owner, profiles):
        """ Register the user of the specified profiles.

        If necessary this method will try to free profiles used by different
        owners, however it might fail in doing so and the client must be able
        to handle.

        Parameters
        ----------
        new_owner : unicode
            Id of the plugin requesting the right to use some profiles

        profiles : list(str)
            The names of the profiles the user want the privilege to use.

        Returns
        -------
        profiles : dict or list
            The required profiles as a dict {name: profile}, can be empty if
            a profile is missing or if the manager failed to release a profile.

        missing : list
            The list of profiles that was not found.

        """
        if new_owner not in self._users:
            logger = logging.getLogger(__name__)
            mess = cleandoc('''Plugin {} tried to request profiles, but it is
                not a registered user.'''.format(new_owner))
            logger.error(mess)
            return {}, []

        missing = [prof for prof in profiles if prof not in self.all_profiles]

        if missing:
            return {}, missing

        to_release = defaultdict(list)
        # Identify the profiles which need to be released
        for prof in profiles:
            if prof in self._used_profiles:
                old_owner = self._used_profiles[prof]
                decl = self._users[old_owner]
                if decl.default_policy == 'unreleasable':
                    return {}, []

                to_release[decl.release_method].append(prof)

        if to_release:
            for meth, profs in to_release.iteritems():
                res = meth(self.workbench, profiles=profs)
                if not res:
                    return {}, []

        # Now that we are sure that the profiles can be sent to the users,
        # remove them from the available_profiles list, register who is using
        # them,  and load them
        avail = self.available_profiles
        self.available_profiles = [
            prof for prof in avail if prof not in profiles
        ]

        used = {prof: new_owner for prof in profiles}
        self._used_profiles.update(used)

        mapping = self._profiles_map
        profile_objects = {
            prof: open_profile(mapping[prof])
            for prof in profiles
        }

        return profile_objects, []

    def profiles_released(self, owner, profiles):
        """ Notify the manager that the specified are not used anymore

        The user should not keep any reference to the profile after this call.

        Parameters
        ----------
        owner : Plugin
            Current owner of the profiles

        profiles : list(str)
            The names of the profiles the user is not using anymore.

        """
        mapping = self._used_profiles
        for prof in profiles:
            try:
                del mapping[prof]
            except KeyError:
                pass

        avail = list(self.available_profiles)
        self.available_profiles = avail + profiles

    def matching_drivers(self, driver_types):
        """ List the existing drivers whose type is in the specified list

        Parameters
        ----------
        driver_types : list(str)
            Names of the driver types for which matching drivers should be
            returned

        Returns
        -------
        drivers : list(str)
            Names of the matching drivers

        """
        drivers = []
        for d_type in driver_types:
            drivs = [
                driv for driv, d_class in self._drivers.iteritems()
                if issubclass(d_class, self._driver_types[d_type])
            ]
            drivers.extend(drivs)

        return drivers

    def matching_profiles(self, drivers):
        """ List the existing profiles whose driver is in the specified list

        Parameters
        ----------
        drivers : list(str)
            Names of the drivers for which matching profiles should be returned

        Returns
        -------
        profiles : list(str)
            Names of the matching profiles

        """
        profiles = []
        for driver in drivers:
            profs = [
                prof for prof, path in self._profiles_map.iteritems()
                if open_profile(path)['driver'] == driver
            ]
            profiles.extend(profs)

        return profiles

    def matching_form(self, driver, view=False):
        """ Return the appropriate form to edit a profile for the given driver.

        Parameters
        ----------
        driver : str
            Name of the driver or driver type for which a form should be
            returned.

        view : bool, optionnal
            Whether or not to return the matching view alongside the form.

        Returns
        -------
        form : AbstractConnectionForm
            Form allowing to edit a profile for the given driver.

        view : Enamldef
            View matching the form.

        """
        form, f_view = None, EmptyView
        if driver in self._driver_types:
            aux = self._driver_types[driver].__name__
            form, f_view = self._forms.get(aux, (None, EmptyView))

        elif driver in self._drivers:
            d_mro = self._drivers[driver].mro()
            i = 1
            while driver not in self._forms and i < len(d_mro):
                driver = d_mro[i].__name__
                i += 1

            form, f_view = self._forms.get(driver, (None, EmptyView))

        if form is None:
            logger = logging.getLogger(__name__)
            mes = "No matching form was found for the driver {}".format(driver)
            logger.warn(mes)

        if view:
            return form, f_view
        else:
            return form

    def report(self):
        """ Give access to the failures which happened at startup.

        """
        return self._failed

    def reload_driver(self, driver):
        """ Reload a driver definition.

        All the classes in the driver mro are reloaded in reverse order.

        Parameters
        ----------
        driver : str
            Name of the driver whose definition should be reloaded.

        Returns
        -------
        driver: class
            Reloaded definition of the driver.

        """
        d_class = self._drivers[driver]
        mro = type.mro(d_class)[::-1]
        for ancestor in mro[2::]:
            name = ancestor.__name__
            mod = getmodule(ancestor)
            mod = reload(mod)
            mem = getmembers(mod, isclass)
            reloaded = [m[1] for m in mem if m[0] == name][0]

            if ancestor in self._drivers.values():
                for k, v in self._drivers.iteritems():
                    if v == ancestor:
                        self._drivers[k] = reloaded

            if ancestor in self._driver_types.values():
                for k, v in self._driver_types.iteritems():
                    if v == ancestor:
                        self._driver_types[k] = reloaded

        return self._drivers[driver]

    # --- Private API ---------------------------------------------------------
    # Drivers types
    _driver_types = Dict(Str(), Subclass(BaseInstrument))

    # Drivers
    _drivers = Dict(Str(), Subclass(BaseInstrument))

    # Connections forms and views {driver_name: (form, view)}.
    _forms = Dict(Str(), Tuple())

    # Mapping between profile names and path to .ini file holding the data.
    _profiles_map = Dict(Str(), Unicode())

    # Mapping between profile names and user id.
    _used_profiles = Dict(Unicode(), Unicode())

    # Mapping between plugin_id and InstrUser declaration.
    _users = Dict(Unicode(), Typed(InstrUser))

    # Dict holding the list of failures which happened during loading
    _failed = Dict()

    # Watchdog observer
    _observer = Typed(Observer, ())

    def _refresh_profiles_map(self, deferred=False):
        """ Refresh the known profiles

        """
        profiles = {}
        for path in self.profiles_folders:
            if os.path.isdir(path):
                filenames = sorted(f for f in os.listdir(path)
                                   if (os.path.isfile(os.path.join(path, f))
                                       and f.endswith('.ini')))

                for filename in filenames:
                    profile_name = self._normalise_name(filename)
                    prof_path = os.path.join(path, filename)
                    # Beware redundant names are overwrited
                    profiles[profile_name] = prof_path
            else:
                logger = logging.getLogger(__name__)
                logger.warn('{} is not a valid directory'.format(path))

        if deferred:
            deferred_call(self._set_profiles_map, profiles)
        else:
            self._set_profiles_map(profiles)

    def _set_profiles_map(self, profiles):
        """ Set the known profiles values.

        This function is used for deferred settings to avoid issues with
        watchdog threads.

        """
        self._profiles_map = profiles
        self.all_profiles = sorted(list(profiles.keys()))
        self.available_profiles = [
            prof for prof in sorted(profiles.keys())
            if prof not in self._used_profiles
        ]

    def _refresh_drivers(self):
        """ Refresh the known driver types and drivers.

        """
        path = PACKAGE_PATH
        failed = {}

        modules = self._explore_package('instruments', path, failed,
                                        self.drivers_loading)

        driver_types = {}
        driver_packages = []
        drivers = {}
        self._explore_modules_for_drivers(modules, driver_types,
                                          driver_packages, drivers, failed,
                                          'instruments')

        # Remove packages which should not be explored
        for pack in driver_packages[:]:
            if pack in self.drivers_loading:
                driver_packages.remove(pack)

        # Explore packages
        while driver_packages:
            pack = driver_packages.pop(0)
            pack_path = os.path.join(PACKAGE_PATH, '..', *pack.split('.'))

            modules = self._explore_package(pack, pack_path, failed,
                                            self.drivers_loading)

            self._explore_modules_for_drivers(modules,
                                              driver_types,
                                              driver_packages,
                                              drivers,
                                              failed,
                                              prefix=pack)

            # Remove packages which should not be explored
            for pack in driver_packages[:]:
                if pack in self.drivers_loading:
                    driver_packages.remove(pack)

        self._drivers = drivers
        self._driver_types = driver_types

        self.driver_types = sorted(driver_types.keys())
        self.drivers = sorted(drivers.keys())
        self._failed = failed
        # TODO do something with failed

    @staticmethod
    def _explore_modules_for_drivers(modules, types, packages, drivers, failed,
                                     prefix):
        """ Explore a list of modules.

        Parameters
        ----------
        modules : list
            The list of modules to explore

        types : dict
            A dict in which discovered types will be stored.

        packages : list
            A list in which discovered packages will be stored.

        drivers : dict
            A dict in which discovered drivers will be stored.

        failed : list
            A list in which failed imports will be stored.

        """
        for mod in modules:
            try:
                m = import_module('.' + mod, MODULE_ANCHOR)
            except Exception as e:
                log = logging.getLogger(__name__)
                mess = 'Failed to import mod {} : {}'.format(mod, e)
                log.error(mess)
                failed[mod] = mess
                continue

            if hasattr(m, 'DRIVER_TYPES'):
                types.update(m.DRIVER_TYPES)

            if hasattr(m, 'DRIVER_PACKAGES'):
                packs = [prefix + '.' + pack for pack in m.DRIVER_PACKAGES]
                packages.extend(packs)

            if hasattr(m, 'DRIVERS'):
                drivers.update(m.DRIVERS)

    def _refresh_forms(self):
        """ Refresh the list of known forms.

        """
        path = os.path.join(PACKAGE_PATH, 'manager', 'forms')
        failed = {}

        modules = self._explore_package('instruments.manager.forms', path,
                                        failed, [])

        forms = {}
        for mod in modules:
            try:
                m = import_module('.' + mod, MODULE_ANCHOR)
            except Exception as e:
                log = logging.getLogger(__name__)
                mess = 'Failed to import form {} : {}'.format(mod, e)
                log.error(mess)
                failed[mod] = mess
                continue

            if hasattr(m, 'FORMS'):
                forms.update(m.FORMS)

        path = os.path.join(path, 'views')
        view_modules = self._explore_package('instruments.manager.forms.views',
                                             path, failed, [], '.enaml')

        views = {}
        for mod in view_modules:
            try:
                with enaml.imports():
                    m = import_module('.' + mod, MODULE_ANCHOR)
            except Exception as e:
                log = logging.getLogger(__name__)
                mess = 'Failed to import view {} : {}'.format(mod, e)
                log.error(mess)
                failed[mod] = mess
                continue

            if hasattr(m, 'FORMS_MAP_VIEWS'):
                views.update(m.FORMS_MAP_VIEWS)

        self._forms = {
            driver: (form, views[form.__name__])
            for driver, form in forms.iteritems() if form.__name__ in views
        }

    @staticmethod
    def _explore_package(pack, pack_path, failed, exceptions, suffix='.py'):
        """ Explore a package

        Parameters
        ----------
        pack : str
            The package name relative to "instruments". (ex : instruments.visa)

        pack_path : unicode
            Path of the package to explore

        failed : dict
            A dict in which failed imports will be stored.

        exceptions : list
            List of module which should be ignored

        Returns
        -------
        modules : list
            List of string indicating modules which can be imported

        """
        if not os.path.isdir(pack_path):
            log = logging.getLogger(__name__)
            mess = '{} is not a valid directory.({})'.format(pack, pack_path)
            log.error(mess)
            failed[pack] = mess
            return []

        i = len(suffix)
        modules = sorted(pack + '.' + m[:-i] for m in os.listdir(pack_path)
                         if (os.path.isfile(os.path.join(pack_path, m))
                             and m.endswith(suffix)))

        if suffix == '.py':
            try:
                modules.remove(pack + '.__init__')
            except ValueError:
                log = logging.getLogger(__name__)
                mess = cleandoc('''{} is not a valid Python package (miss
                    __init__.py).'''.format(pack))
                log.error(mess)
                failed[pack] = mess
                return []
        else:
            if '__init__.py' not in os.listdir(pack_path):
                log = logging.getLogger(__name__)
                mess = cleandoc('''{} is not a valid Python package (miss
                    __init__.py).'''.format(pack))
                log.error(mess)
                failed[pack] = mess
                return []

        # Remove modules which should not be imported
        for mod in modules[:]:
            if mod in exceptions:
                modules.remove(mod)

        return modules

    def _refresh_users(self):
        """ Refresh the list of potential users.

        """
        workbench = self.workbench
        point = workbench.get_extension_point(USERS_POINT)
        extensions = point.extensions
        if not extensions:
            self._users.clear()
            return

        new_users = {}
        old_users = self._users
        for extension in extensions:
            plugin_id = extension.plugin_id
            if plugin_id in old_users:
                user = old_users[plugin_id]
            else:
                user = self._load_user(extension)
            new_users[plugin_id] = user

        self._users = new_users

    def _load_user(self, extension):
        """ Load the user object for the given extension.

        Parameters
        ----------
        extension : Extension
            The extension object of interest

        Returns
        -------
        user : User
            The first InstrUser object declared by the extension

        """
        workbench = self.workbench
        users = extension.get_children(InstrUser)
        if extension.factory is not None and not users:
            user = extension.factory(workbench)
            if not isinstance(user, InstrUser):
                msg = "extension '%s' created non-InstrUser of type '%s'"
                args = (extension.qualified_id, type(user).__name__)
                raise TypeError(msg % args)
        else:
            user = users[0]

        return user

    def _on_users_updated(self, change):
        """ The observer for the commands extension point.

        """
        self._refresh_users()

    def _bind_observers(self):
        """ Setup the observers for the plugin.

        """
        workbench = self.workbench
        point = workbench.get_extension_point(USERS_POINT)
        point.observe('extensions', self._on_users_updated)

        for folder in self.profiles_folders:
            handler = _FileListUpdater(self._refresh_profiles_map)
            self._observer.schedule(handler, folder, recursive=True)

        self._observer.start()
        self.observe('drivers_loading', self._update_drivers)
        self.observe('profiles_folders', self._update_profiles)

    def _unbind_observers(self):
        """ Remove the observers for the plugin.

        """
        self.unobserve('drivers_loading', self._update_drivers)
        self.unobserve('profiles_folders', self._update_profiles)
        self._observer.unschedule_all()
        self._observer.stop()
        self._observer.join()

        workbench = self.workbench
        point = workbench.get_extension_point(USERS_POINT)
        point.unobserve('extensions', self._on_users_updated)

    def _update_drivers(self, change):
        """ Observer ensuring that loading preferences are taken into account.

        """
        self._refresh_drivers()

    def _update_profiles(self, change):
        """ Observer ensuring that we observe the right profile folders.

        """
        self._observer.unschedule_all()

        for folder in self.profiles_folders:
            handler = _FileListUpdater(self._refresh_profiles_map)
            self._observer.schedule(handler, folder, recursive=True)

    @staticmethod
    def _normalise_name(name):
        """ Normalize the name of the profiles by replacing '_' by spaces,
        removing the extension, adding spaces between 'aA' sequences and
        capitalizing the first letter.

        """
        if name.endswith('.ini') or name.endswith('Task'):
            name = name[:-4] + '\0'
        aux = ''
        for i, char in enumerate(name):
            if char == '_':
                aux += ' '
                continue

            if char != '\0':
                if char.isupper() and i != 0:
                    if name[i - 1].islower():
                        if name[i + 1].islower():
                            aux += ' ' + char.lower()
                        else:
                            aux += ' ' + char
                    else:
                        if name[i + 1].islower():
                            aux += ' ' + char.lower()
                        else:
                            aux += char
                else:
                    if i == 0:
                        aux += char.upper()
                    else:
                        aux += char
        return aux
Ejemplo n.º 27
0
class ImportedSymbol(Symbol):

    module = d_(Str())

    def get_object(self):
        return importlib.import_module(self.module)
Ejemplo n.º 28
0
Archivo: menu.py Proyecto: ylwb/enaml
class Menu(ToolkitObject):
    """ A widget used as a menu in a MenuBar.

    """
    #: The title to use for the menu.
    title = d_(Str())

    #: Whether or not the menu is enabled.
    enabled = d_(Bool(True))

    #: Whether or not the menu is visible.
    visible = d_(Bool(True))

    #: Whether this menu should behave as a context menu for its parent.
    context_menu = d_(Bool(False))

    #: A reference to the ProxyMenu object.
    proxy = Typed(ProxyMenu)

    def items(self):
        """ Get the items defined on the Menu.

        A menu item is one of Action, ActionGroup, or Menu.

        """
        allowed = (Action, ActionGroup, Menu)
        return [c for c in self.children if isinstance(c, allowed)]

    #--------------------------------------------------------------------------
    # Observers
    #--------------------------------------------------------------------------
    @observe('title', 'enabled', 'visible', 'context_menu')
    def _update_proxy(self, change):
        """ An observer which updates the proxy when the menu changes.

        """
        # The superclass implementation is sufficient.
        super(Menu, self)._update_proxy(change)

    #--------------------------------------------------------------------------
    # Utility Methods
    #--------------------------------------------------------------------------
    def popup(self):
        """ Popup the menu over the current mouse location.

        """
        if not self.is_initialized:
            self.initialize()
        if not self.proxy_is_active:
            self.activate_proxy()
        self.proxy.popup()

    def close(self):
        """ Close the menu.

        This API can be used by embedded widgets to close the menu
        at the appropriate time.

        """
        if self.proxy_is_active:
            self.proxy.close()
class RetentionAnalysis(Analysis):

    # Text output that can be updated back to the GUI
    enable = Bool()
    text = Str()

    def __init__(self, name, experiment, description=''):
        super(RetentionAnalysis, self).__init__(name, experiment, description)
        self.properties += ['enable', 'text']

    def analyzeIteration(self, iterationResults, experimentResults):
        if self.enable:
            self.retention(iterationResults)

    def retention(self, iter_res):
        # thresholdROI iteration data path
        th_path = self.experiment.thresholdROIAnalysis.iter_analysis_path
        atoms = iter_res[th_path][()]
        total = atoms.shape[0]
        # find the loading for each roi
        loaded = np.sum(atoms[:, 0, :], axis=0)
        # find the retention for each roi
        retained = np.sum(np.logical_and(atoms[:, 0, :], atoms[:, 1, :]),
                          axis=0)
        # find the number of reloaded atoms
        reloaded = np.sum(np.logical_and(np.logical_not(atoms[:, 0, :]),
                                         atoms[:, 1, :]),
                          axis=0)

        loading = loaded.astype('float') / total

        retention = retained.astype('float') / loaded
        # find the 1 sigma confidence interval for binomial data using the
        # normal approximation:
        # http://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval
        retention_sigma = np.sqrt(retention * (1 - retention) / loaded)
        reloading = reloaded.astype('float') / total

        rows = self.experiment.ROI_rows
        columns = self.experiment.ROI_columns
        # write results to string
        text = 'total: ' + str(total) + '\n\n'
        text += 'loading:\tmax {:.3f},\tavg {:.3f}\n'.format(
            np.nanmax(loading), np.nanmean(loading))
        text += '\n'.join([
            '\t'.join(
                map(lambda x: '{:.3f}'.format(x),
                    loading[row * columns:(row + 1) * columns]))
            for row in xrange(rows)
        ]) + '\n\n'
        text += 'retention:\tmax {:.3f},\tavg {:.3f}\n'.format(
            np.nanmax(retention), np.nanmean(retention))
        text += '\n'.join([
            '\t'.join(
                map(lambda x: '{:.3f}'.format(x),
                    retention[row * columns:(row + 1) * columns]))
            for row in xrange(rows)
        ]) + '\n\n'
        text += 'reloading:\tmax {:.3f},\tavg {:.3f}\n'.format(
            np.nanmax(reloading), np.nanmean(reloading))
        text += '\n'.join([
            '\t'.join(
                map(lambda x: '{:.3f}'.format(x),
                    reloading[row * columns:(row + 1) * columns]))
            for row in xrange(rows)
        ]) + '\n'

        iter_res['analysis/loading_retention/loaded'] = loaded
        iter_res['analysis/loading_retention/retained'] = retained
        iter_res['analysis/loading_retention/reloaded'] = reloaded
        iter_res['analysis/loading_retention/loading'] = loading
        iter_res['analysis/loading_retention/retention'] = retention
        iter_res[
            'analysis/loading_retention/retention_sigma'] = retention_sigma
        iter_res['analysis/loading_retention/reloading'] = reloading
        iter_res['analysis/loading_retention/text'] = text
        iter_res['analysis/loading_retention/atoms'] = atoms

        self.set_gui({'text': text})
Ejemplo n.º 30
0
class JobPlugin(Plugin):

    #: Units
    units = Enum(*unit_conversions.keys()).tag(config=True)

    #: Available materials
    materials = List(Material).tag(config=True)

    #: Current material
    material = Instance(Material, ()).tag(config=True)

    #: Previous jobs
    jobs = List(Job).tag(config=True)

    #: Current job
    job = Instance(Job).tag(config=True)

    #: Recently open paths
    recent_documents = List(Str()).tag(config=True)

    #: Number of recent documents
    recent_document_limit = Int(10).tag(config=True)
    saved_jobs_limit = Int(100).tag(config=True)

    #: Timeout for optimizing paths
    optimizer_timeout = Float(10, strict=False).tag(config=True)

    def _default_job(self):
        return Job(material=self.material)

    def _default_units(self):
        return 'in'

    # -------------------------------------------------------------------------
    # Plugin API
    # -------------------------------------------------------------------------
    def start(self):
        """ Register the plugins this plugin depends on

        """
        #: Now load state
        super(JobPlugin, self).start()

        #: If we loaded from state, refresh
        if self.job.document:
            self.refresh_preview()

        self.init_recent_documents_menu()

    # -------------------------------------------------------------------------
    # Job API
    # -------------------------------------------------------------------------
    def request_approval(self, job):
        """ Request approval to start a job. This will set the job.info.status
        to either 'approved' or 'cancelled'.

        """
        ui = self.workbench.get_plugin('enaml.workbench.ui')
        with enaml.imports():
            from .dialogs import JobApprovalDialog
        JobApprovalDialog(ui.window, plugin=self, job=job).exec_()

    def refresh_preview(self):
        """ Refresh the preview. Other plugins can request this """
        self._refresh_preview({})

    def open_document(self, path, nodes=None):
        """ Set the job.document if it is empty, otherwise close and create
        a  new Job instance.

        """
        if path == '-':
            log.debug("Opening document from stdin...")
        elif not os.path.exists(path):
            raise JobError("Cannot open %s, it does not exist!" % path)
        elif not os.path.isfile(path):
            raise JobError("Cannot open %s, it is not a file!" % path)

        # Close any old docs
        self.close_document()

        log.info("Opening {doc}".format(doc=path))
        try:
            self.job.document_kwargs = dict(ids=nodes)
            self.job.document = path
        except ValueError as e:
            #: Wrap in a JobError
            raise JobError(e)

        # Update recent documents
        if path != '-':
            docs = self.recent_documents[:]
            # Remove and re-ad to make it most recent
            if path in docs:
                docs.remove(path)
            docs.append(path)

            # Keep limit to 10
            if len(docs) > 10:
                docs.pop(0)

            self.recent_documents = docs

    def save_document(self):
        # Copy so the ui's update
        job = self.job
        jobs = self.jobs[:]
        if job in jobs:
            # Save a copy or any changes will update the copy as well
            job = job.clone()
        jobs.append(job)

        # Limit size
        if len(jobs) > self.saved_jobs_limit:
            jobs.pop(0)

        self.jobs = jobs

    def close_document(self):
        """ If the job currently has a "document" add this to the jobs list
        and create a new Job instance. Otherwise no job is open so do nothing.

        """
        if not self.job.document:
            return

        log.info("Closing {doc}".format(doc=self.job.document))
        # Create a new default job
        self.job = self._default_job()

    @observe('job.material')
    def _observe_material(self, change):
        """ Keep the job material and plugin material in sync.

        """
        m = self.material
        job = self.job
        if job.material != m:
            job.material = m

    @observe('job', 'job.model', 'job.material', 'material.size',
             'material.padding')
    def _refresh_preview(self, change):
        """ Redraw the preview on the screen

        """
        log.info(change)
        view_items = []

        #: Transform used by the view
        preview_plugin = self.workbench.get_plugin('inkcut.preview')
        job = self.job
        plot = preview_plugin.preview
        t = preview_plugin.transform

        #: Draw the device
        plugin = self.workbench.get_plugin('inkcut.device')
        device = plugin.device

        #: Apply the final output transforms from the device
        transform = device.transform if device else lambda p: p

        if device and device.area:
            area = device.area
            view_items.append(
                dict(path=transform(device.area.path * t),
                     pen=plot.pen_device,
                     skip_autorange=True)  #(False, [area.size[0], 0]))
            )

        #: The model is only set when a document is open and has no errors
        if job.model:
            view_items.extend([
                dict(path=transform(job.move_path), pen=plot.pen_up),
                dict(path=transform(job.cut_path), pen=plot.pen_down)
            ])
            #: TODO: This
            #if self.show_offset_path:
            #    view_items.append(PainterPathPlotItem(
            # self.job.offset_path,pen=self.pen_offset))
        if job.material:
            # Also observe any change to job.media and job.device
            view_items.extend([
                dict(path=transform(job.material.path * t),
                     pen=plot.pen_media,
                     skip_autorange=([0, job.size[0]], [0, job.size[1]])),
                dict(path=transform(job.material.padding_path * t),
                     pen=plot.pen_media_padding,
                     skip_autorange=True)
            ])

        #: Update the plot
        preview_plugin.set_preview(*view_items)

        #: Save config
        self.save()

    # -------------------------------------------------------------------------
    # Utilities
    # -------------------------------------------------------------------------

    def init_recent_documents_menu(self):
        """ Insert the `RecentDocumentsMenu` into the Menu declaration that
        automatically updates the recent document menu links.

        """
        recent_menu = self.get_recent_menu()
        if recent_menu is None:
            return
        for c in recent_menu.children:
            if isinstance(c, RecentDocumentsMenu):
                return  # Already added
        documents_menu = RecentDocumentsMenu(plugin=self, parent=recent_menu)
        documents_menu.initialize()

    def get_recent_menu(self):
        """ Get the recent menu item WorkbenchMenu """
        ui = self.workbench.get_plugin('enaml.workbench.ui')
        window_model = ui._model
        if not window_model:
            return
        for menu in window_model.menus:
            if menu.item.path == '/file':
                for c in menu.children:
                    if isinstance(c, WorkbenchMenu):
                        if c.item.path == '/file/recent/':
                            return c