Exemple #1
0
    def duplicate_data(self,
                       pipe_from=None,
                       pipe_to=None,
                       model_info=None,
                       global_stats=False,
                       verbose=True):
        """Duplicate the data specific to a single hybrid data pipe.

        @keyword pipe_from:     The data pipe to copy the data from.
        @type pipe_from:        str
        @keyword pipe_to:       The data pipe to copy the data to.
        @type pipe_to:          str
        @keyword model_info:    The model information from model_loop().
        @type model_info:       unknown
        @keyword global_stats:  The global statistics flag.
        @type global_stats:     bool
        @keyword verbose:       A flag which if True will cause info to be printed out.
        @type verbose:          bool
        """

        # First create the pipe_to data pipe, if it doesn't exist, but don't switch to it.
        if not pipes.has_pipe(pipe_to):
            pipes.create(pipe_to, pipe_type='hybrid', switch=False)

        # Get the data pipes.
        dp_from = pipes.get_pipe(pipe_from)
        dp_to = pipes.get_pipe(pipe_to)

        # Test that the target data pipe has no sequence loaded.
        if not exists_mol_res_spin_data(pipe_to):
            raise RelaxSequenceError(pipe_to)

        # Duplicate the hybrid pipe list data structure.
        dp_to.hybrid_pipes = dp_from.hybrid_pipes
Exemple #2
0
    def duplicate_data(self, pipe_from=None, pipe_to=None, model_info=None, global_stats=False, verbose=True):
        """Duplicate the data specific to a single hybrid data pipe.

        @keyword pipe_from:     The data pipe to copy the data from.
        @type pipe_from:        str
        @keyword pipe_to:       The data pipe to copy the data to.
        @type pipe_to:          str
        @keyword model_info:    The model information from model_loop().
        @type model_info:       unknown
        @keyword global_stats:  The global statistics flag.
        @type global_stats:     bool
        @keyword verbose:       A flag which if True will cause info to be printed out.
        @type verbose:          bool
        """

        # First create the pipe_to data pipe, if it doesn't exist, but don't switch to it.
        if not pipes.has_pipe(pipe_to):
            pipes.create(pipe_to, pipe_type='hybrid', switch=False)

        # Get the data pipes.
        dp_from = pipes.get_pipe(pipe_from)
        dp_to = pipes.get_pipe(pipe_to)

        # Test that the target data pipe has no sequence loaded.
        if not exists_mol_res_spin_data(pipe_to):
            raise RelaxSequenceError(pipe_to)

        # Duplicate the hybrid pipe list data structure.
        dp_to.hybrid_pipes = dp_from.hybrid_pipes
    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # Printout.
        section(file=sys.stdout, text="Variable checking", prespace=2)

        # The pipe name.
        if not has_pipe(self.pipe_name):
            raise RelaxNoPipeError(self.pipe_name)

        # Check the model selection.
        allowed = ['AIC', 'AICc', 'BIC']
        if self.modsel not in allowed:
            raise RelaxError("The model selection technique '%s' is not in the allowed list of %s." % (self.modsel, allowed))

        # Some warning for the user if the pure numeric solution is selected.
        if self.numeric_only:
            # Loop over all models.
            for model in self.models:
                # Skip the models used for nesting.
                if model in [MODEL_CR72, MODEL_MMQ_CR72, MODEL_MP05]:
                    continue

                # Warnings for all other analytic models.
                if model in MODEL_LIST_ANALYTIC:
                    warn(RelaxWarning("The analytic model '%s' will be optimised but will not be used in any way in this numeric model only auto-analysis." % model))

        # Printout.
        print("The dispersion auto-analysis variables are OK.")
Exemple #4
0
    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # Printout.
        section(file=sys.stdout, text="Variable checking", prespace=2)

        # The pipe name.
        if not has_pipe(self.pipe_name):
            raise RelaxNoPipeError(self.pipe_name)

        # Check the model selection.
        allowed = ['AIC', 'AICc', 'BIC']
        if self.modsel not in allowed:
            raise RelaxError(
                "The model selection technique '%s' is not in the allowed list of %s."
                % (self.modsel, allowed))

        # Some warning for the user if the pure numeric solution is selected.
        if self.numeric_only:
            # Loop over all models.
            for model in self.models:
                # Skip the models used for nesting.
                if model in MODEL_LIST_NEST:
                    continue

                # Warnings for all other analytic models.
                if model in MODEL_LIST_ANALYTIC:
                    warn(
                        RelaxWarning(
                            "The analytic model '%s' will be optimised but will not be used in any way in this numeric model only auto-analysis."
                            % model))

        # Printout.
        print("The dispersion auto-analysis variables are OK.")
Exemple #5
0
    def duplicate_data(self,
                       pipe_from=None,
                       pipe_to=None,
                       model_info=None,
                       global_stats=False,
                       verbose=True):
        """Duplicate the data specific to a single frame order data pipe.

        @keyword pipe_from:     The data pipe to copy the data from.
        @type pipe_from:        str
        @keyword pipe_to:       The data pipe to copy the data to.
        @type pipe_to:          str
        @keyword model_info:    The model information from model_loop().  This is unused.
        @type model_info:       None
        @keyword global_stats:  The global statistics flag.
        @type global_stats:     bool
        @keyword verbose:       Unused.
        @type verbose:          bool
        """

        # Check that the data pipe does not exist.
        if pipes.has_pipe(pipe_to):
            raise RelaxError("The data pipe '%s' already exists." % pipe_to)

        # Create the pipe_to data pipe by copying.
        pipes.copy(pipe_from=pipe_from, pipe_to=pipe_to)
    def model_selection(self, modsel_pipe=None, dir=None, write_flag=True):
        """Model selection function."""

        # Model selection (delete the model selection pipe if it already exists).
        if has_pipe(modsel_pipe):
            self.interpreter.pipe.delete(modsel_pipe)
        self.interpreter.model_selection(method='AIC', modsel_pipe=modsel_pipe, bundle=self.pipe_bundle, pipes=self.pipes)

        # Write the results.
        if write_flag:
            self.interpreter.results.write(file='results', dir=dir, force=True)
    def load_tensor(self):
        """Function for loading the optimised diffusion tensor."""

        # Create the data pipe for the previous data (deleting the old data pipe first if necessary).
        if has_pipe(self.name_pipe('previous')):
            self.interpreter.pipe.delete(self.name_pipe('previous'))
        self.interpreter.pipe.create(self.name_pipe('previous'), 'mf', bundle=self.pipe_bundle)

        # Load the optimised diffusion tensor from the initial round.
        if self.round == 1:
            self.interpreter.results.read('results', self.results_dir+self.diff_model + sep+'init')

        # Load the optimised diffusion tensor from the previous round.
        else:
            self.interpreter.results.read('results', self.results_dir+self.diff_model + sep+'round_'+repr(self.round-1)+sep+'opt')
    def multi_model(self, local_tm=False):
        """Function for optimisation of all model-free models."""

        # Set the data pipe names (also the names of preset model-free models).
        if local_tm:
            models = self.local_tm_models
            self.pipes = self.local_tm_models
        else:
            models = self.mf_models
        self.pipes = []
        for i in range(len(models)):
            self.pipes.append(self.name_pipe(models[i]))

        # Loop over the data pipes.
        for i in range(len(models)):
            # Place the model name into the status container.
            status.auto_analysis[self.pipe_bundle].current_model = models[i]

            # Create the data pipe (by copying).
            if has_pipe(self.pipes[i]):
                self.interpreter.pipe.delete(self.pipes[i])
            self.interpreter.pipe.copy(self.pipe_name, self.pipes[i], bundle_to=self.pipe_bundle)
            self.interpreter.pipe.switch(self.pipes[i])

            # Copy the diffusion tensor from the 'opt' data pipe and prevent it from being minimised.
            if not local_tm:
                self.interpreter.diffusion_tensor.copy(self.name_pipe('previous'))
                self.interpreter.fix('diff')

            # Select the model-free model.
            self.interpreter.model_free.select_model(model=models[i])

            # Minimise.
            self.interpreter.grid_search(inc=self.grid_inc)
            self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)

            # Model elimination.
            self.interpreter.eliminate()

            # Write the results.
            dir = self.base_dir + models[i]
            self.interpreter.results.write(file='results', dir=dir, force=True)

        # Unset the status.
        status.auto_analysis[self.pipe_bundle].current_model = None
Exemple #9
0
    def spin_count(self):
        """Count the number of loaded spins, returning a string formatted as 'xxx spins loaded'.

        @return:    The number of loaded spins in the format 'xxx spins loaded'.
        @rtype:     str
        """

        # The data pipe.
        if hasattr(self.data, 'pipe_name'):
            pipe = self.data.pipe_name
        else:
            pipe = cdp_name()

        # The count.
        if not has_pipe(pipe):
            num = 0
        else:
            num = count_spins(pipe=pipe)

        # Return the formatted string.
        return "%s spins loaded and selected" % num
Exemple #10
0
    def spin_count(self):
        """Count the number of loaded spins, returning a string formatted as 'xxx spins loaded'.

        @return:    The number of loaded spins in the format 'xxx spins loaded'.
        @rtype:     str
        """

        # The data pipe.
        if hasattr(self.data, 'pipe_name'):
            pipe = self.data.pipe_name
        else:
            pipe = cdp_name()

        # The count.
        if not has_pipe(pipe):
            num = 0
        else:
            num = count_spins(pipe=pipe)

        # Return the formatted string.
        return "%s spins loaded and selected" % num
Exemple #11
0
    def duplicate_data(self, pipe_from=None, pipe_to=None, model_info=None, global_stats=False, verbose=True):
        """Duplicate the data specific to a single frame order data pipe.

        @keyword pipe_from:     The data pipe to copy the data from.
        @type pipe_from:        str
        @keyword pipe_to:       The data pipe to copy the data to.
        @type pipe_to:          str
        @keyword model_info:    The model information from model_loop().  This is unused.
        @type model_info:       None
        @keyword global_stats:  The global statistics flag.
        @type global_stats:     bool
        @keyword verbose:       Unused.
        @type verbose:          bool
        """

        # Check that the data pipe does not exist.
        if pipes.has_pipe(pipe_to):
            raise RelaxError("The data pipe '%s' already exists." % pipe_to)

        # Create the pipe_to data pipe by copying.
        pipes.copy(pipe_from=pipe_from, pipe_to=pipe_to)
Exemple #12
0
    def __init__(self, parent, id=-1, pos=wx.Point(-1, -1), size=wx.Size(-1, -1), style=524288, name='scrolledpanel', gui=None, analysis_name=None, pipe_name=None, pipe_bundle=None, uf_exec=[], data_index=None):
        """Build the automatic R1 and R2 analysis GUI frame elements.

        @param parent:          The parent wx element.
        @type parent:           wx object
        @keyword id:            The unique ID number.
        @type id:               int
        @keyword pos:           The position.
        @type pos:              wx.Size object
        @keyword size:          The size.
        @type size:             wx.Size object
        @keyword style:         The style.
        @type style:            int
        @keyword name:          The name for the panel.
        @type name:             unicode
        @keyword gui:           The main GUI class.
        @type gui:              gui.relax_gui.Main instance
        @keyword analysis_name: The name of the analysis (the name in the tab part of the notebook).
        @type analysis_name:    str
        @keyword pipe_name:     The name of the data pipe associated with this analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The name of the data pipe bundle associated with this analysis.
        @type pipe_bundle:      str
        @keyword uf_exec:       The list of user function on_execute methods returned from the new analysis wizard.
        @type uf_exec:          list of methods
        @keyword data_index:    The index of the analysis in the relax data store (set to None if no data currently exists).
        @type data_index:       None or int
        """

        # Store the GUI main class.
        self.gui = gui

        # Init.
        self.init_flag = True

        # New data container.
        if data_index == None:
            # First create the data pipe if not already in existence.
            if not has_pipe(pipe_name):
                self.gui.interpreter.apply('pipe.create', pipe_name=pipe_name, pipe_type='relax_fit', bundle=pipe_bundle)

            # Create the data pipe bundle if needed.
            if not has_bundle(pipe_bundle):
                self.gui.interpreter.apply('pipe.bundle', bundle=pipe_bundle, pipe=pipe_name)

            # Generate a storage container in the relax data store, and alias it for easy access.
            data_index = ds.relax_gui.analyses.add(self.label)

            # Store the analysis and pipe names.
            ds.relax_gui.analyses[data_index].analysis_name = analysis_name
            ds.relax_gui.analyses[data_index].pipe_name = pipe_name
            ds.relax_gui.analyses[data_index].pipe_bundle = pipe_bundle

            # Initialise the variables.
            ds.relax_gui.analyses[data_index].frq = ''
            ds.relax_gui.analyses[data_index].grid_inc = None
            ds.relax_gui.analyses[data_index].mc_sim_num = None
            ds.relax_gui.analyses[data_index].save_dir = self.gui.launch_dir

        # Alias the data.
        self.data = ds.relax_gui.analyses[data_index]
        self.data_index = data_index

        # Register the method for updating the spin count for the completion of user functions.
        self.observer_register()

        # Execute the base class method to build the panel.
        super(Auto_rx, self).__init__(parent, id=id, pos=pos, size=size, style=style, name=name)
    def __init__(self, parent, id=-1, pos=wx.Point(-1, -1), size=wx.Size(-1, -1), style=524288, name='scrolledpanel', gui=None, analysis_name=None, pipe_name=None, pipe_bundle=None, uf_exec=[], data_index=None):
        """Build the automatic model-free protocol GUI element.

        @param parent:          The parent wx element.
        @type parent:           wx object
        @keyword id:            The unique ID number.
        @type id:               int
        @keyword pos:           The position.
        @type pos:              wx.Size object
        @keyword size:          The size.
        @type size:             wx.Size object
        @keyword style:         The style.
        @type style:            int
        @keyword name:          The name for the panel.
        @type name:             unicode
        @keyword gui:           The main GUI class.
        @type gui:              gui.relax_gui.Main instance
        @keyword analysis_name: The name of the analysis (the name in the tab part of the notebook).
        @type analysis_name:    str
        @keyword pipe_name:     The name of the original data pipe for this analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The name of the data pipe bundle associated with this analysis.
        @type pipe_bundle:      str
        @keyword uf_exec:       The list of user function on_execute methods returned from the new analysis wizard.
        @type uf_exec:          list of methods
        @keyword data_index:    The index of the analysis in the relax data store (set to None if no data currently exists).
        @type data_index:       None or int
        """

        # Store the GUI main class.
        self.gui = gui

        # Init.
        self.init_flag = True

        # New data container.
        if data_index == None:
            # First create the data pipe if not already in existence.
            if not has_pipe(pipe_name):
                self.gui.interpreter.apply('pipe.create', pipe_name=pipe_name, pipe_type='mf', bundle=pipe_bundle)

            # Create the data pipe bundle if needed.
            if not has_bundle(pipe_bundle):
                self.gui.interpreter.apply('pipe.bundle', bundle=pipe_bundle, pipe=pipe_name)

            # Generate a storage container in the relax data store, and alias it for easy access.
            data_index = ds.relax_gui.analyses.add('model-free')

            # Store the analysis and pipe names.
            ds.relax_gui.analyses[data_index].analysis_name = analysis_name
            ds.relax_gui.analyses[data_index].pipe_name = pipe_name
            ds.relax_gui.analyses[data_index].pipe_bundle = pipe_bundle

            # Initialise the variables.
            ds.relax_gui.analyses[data_index].grid_inc = None
            ds.relax_gui.analyses[data_index].diff_tensor_grid_inc = {'sphere': 11, 'prolate': 11, 'oblate': 11, 'ellipsoid': 6}
            ds.relax_gui.analyses[data_index].mc_sim_num = None
            ds.relax_gui.analyses[data_index].save_dir = self.gui.system_cwd_path
            ds.relax_gui.analyses[data_index].local_tm_models = ['tm0', 'tm1', 'tm2', 'tm3', 'tm4', 'tm5', 'tm6', 'tm7', 'tm8', 'tm9']
            ds.relax_gui.analyses[data_index].mf_models = ['m0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9']
            ds.relax_gui.analyses[data_index].max_iter = 30

        # Error checking.
        if ds.relax_gui.analyses[data_index].pipe_bundle == None:
            raise RelaxError("The pipe bundle must be supplied.")

        # Alias the data.
        self.data = ds.relax_gui.analyses[data_index]
        self.data_index = data_index

        # Backward compatibility.
        if not hasattr(self.data, 'local_tm_models'):
            self.data.local_tm_models = ['tm0', 'tm1', 'tm2', 'tm3', 'tm4', 'tm5', 'tm6', 'tm7', 'tm8', 'tm9']
        if not hasattr(self.data, 'mf_models'):
            self.data.mf_models = ['m0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9']

        # Initialise the mode selection window.
        self.mode_win = Protocol_mode_sel_window()

        # Register the method for updating the spin count for the completion of user functions.
        self.observer_register()

        # Execute the base class method to build the panel.
        super(Auto_model_free, self).__init__(parent, id=id, pos=pos, size=size, style=style, name=name)
    def execute(self):
        """Execute the protocol."""

        # MI - Local tm.
        ################

        if self.diff_model == 'local_tm':
            # Base directory to place files into.
            self.base_dir = self.results_dir+'local_tm'+sep

            # Sequential optimisation of all model-free models (function must be modified to suit).
            self.multi_model(local_tm=True)

            # Model selection.
            self.model_selection(modsel_pipe=self.name_pipe('aic'), dir=self.base_dir + 'aic')


        # Diffusion models MII to MV.
        #############################

        elif self.diff_model == 'sphere' or self.diff_model == 'prolate' or self.diff_model == 'oblate' or self.diff_model == 'ellipsoid':
            # No local_tm directory!
            dir_list = listdir(self.results_dir)
            if 'local_tm' not in dir_list:
                raise RelaxError("The local_tm model must be optimised first.")

            # The initial round of optimisation - not zero if calculations were interrupted.
            self.start_round = self.determine_rnd(model=self.diff_model)

            # Loop until convergence if conv_loop is set, otherwise just loop once.
            # This looping could be made much cleaner by removing the dependence on the determine_rnd() function.
            while True:
                # Determine which round of optimisation to do (init, round_1, round_2, etc).
                self.round = self.determine_rnd(model=self.diff_model)
                status.auto_analysis[self.pipe_bundle].round = self.round

                # Inital round of optimisation for diffusion models MII to MV.
                if self.round == 0:
                    # Base directory to place files into.
                    self.base_dir = self.results_dir+self.diff_model+sep+'init'+sep

                    # Run name.
                    name = self.name_pipe(self.diff_model)

                    # Create the data pipe (deleting the old one if it exists).
                    if has_pipe(name):
                        self.interpreter.pipe.delete(name)
                    self.interpreter.pipe.create(name, 'mf', bundle=self.pipe_bundle)

                    # Load the local tm diffusion model MI results.
                    self.interpreter.results.read(file='results', dir=self.results_dir+'local_tm'+sep+'aic')

                    # Remove the tm parameter.
                    self.interpreter.model_free.remove_tm()

                    # Add an arbitrary diffusion tensor which will be optimised.
                    if self.diff_model == 'sphere':
                        self.interpreter.diffusion_tensor.init(10e-9, fixed=False)
                        inc = self.diff_tensor_grid_inc['sphere']
                    elif self.diff_model == 'prolate':
                        self.interpreter.diffusion_tensor.init((10e-9, 0, 0, 0), spheroid_type='prolate', fixed=False)
                        inc = self.diff_tensor_grid_inc['prolate']
                    elif self.diff_model == 'oblate':
                        self.interpreter.diffusion_tensor.init((10e-9, 0, 0, 0), spheroid_type='oblate', fixed=False)
                        inc = self.diff_tensor_grid_inc['oblate']
                    elif self.diff_model == 'ellipsoid':
                        self.interpreter.diffusion_tensor.init((10e-09, 0, 0, 0, 0, 0), fixed=False)
                        inc = self.diff_tensor_grid_inc['ellipsoid']

                    # Minimise just the diffusion tensor.
                    self.interpreter.fix('all_spins')
                    self.interpreter.grid_search(inc=inc)
                    self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)

                    # Write the results.
                    self.interpreter.results.write(file='results', dir=self.base_dir, force=True)


                # Normal round of optimisation for diffusion models MII to MV.
                else:
                    # Base directory to place files into.
                    self.base_dir = self.results_dir+self.diff_model + sep+'round_'+repr(self.round)+sep

                    # Load the optimised diffusion tensor from either the previous round.
                    self.load_tensor()

                    # Sequential optimisation of all model-free models (function must be modified to suit).
                    self.multi_model()

                    # Model selection.
                    self.model_selection(modsel_pipe=self.name_pipe('aic'), dir=self.base_dir + 'aic')

                    # Final optimisation of all diffusion and model-free parameters.
                    self.interpreter.fix('all', fixed=False)

                    # Minimise all parameters.
                    self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)

                    # Write the results.
                    dir = self.base_dir + 'opt'
                    self.interpreter.results.write(file='results', dir=dir, force=True)

                    # Test for convergence.
                    converged = self.convergence()

                    # Break out of the infinite while loop if automatic looping is not activated or if convergence has occurred.
                    if converged or not self.conv_loop:
                        break

            # Unset the status.
            status.auto_analysis[self.pipe_bundle].round = None


        # Final run.
        ############

        elif self.diff_model == 'final':
            # Diffusion model selection.
            ############################

            # The contents of the results directory.
            dir_list = listdir(self.results_dir)

            # Check that the minimal set of global diffusion models required for the protocol has been optimised.
            min_models = ['local_tm', 'sphere']
            for model in min_models:
                if model not in dir_list:
                    raise RelaxError("The minimum set of global diffusion models required for the protocol have not been optimised, the '%s' model results cannot be found." % model)

            # Build a list of all global diffusion models optimised.
            all_models = ['local_tm', 'sphere', 'prolate', 'oblate', 'ellipsoid']
            self.opt_models = []
            self.pipes = []
            for model in all_models:
                if model in dir_list:
                    self.opt_models.append(model)
                    self.pipes.append(self.name_pipe(model))

            # Remove all temporary pipes used in this auto-analysis.
            for name in pipe_names(bundle=self.pipe_bundle):
                if name in self.pipes + self.mf_model_pipes + self.local_tm_model_pipes + [self.name_pipe('aic'), self.name_pipe('previous')]:
                    self.interpreter.pipe.delete(name)

            # Create the local_tm data pipe.
            self.interpreter.pipe.create(self.name_pipe('local_tm'), 'mf', bundle=self.pipe_bundle)

            # Load the local tm diffusion model MI results.
            self.interpreter.results.read(file='results', dir=self.results_dir+'local_tm'+sep+'aic')

            # Loop over models MII to MV.
            for model in ['sphere', 'prolate', 'oblate', 'ellipsoid']:
                # Skip missing models.
                if model not in self.opt_models:
                    continue

                # Determine which was the last round of optimisation for each of the models.
                self.round = self.determine_rnd(model=model) - 1

                # If no directories begining with 'round_' exist, the script has not been properly utilised!
                if self.round < 1:
                    # Construct the name of the diffusion tensor.
                    name = model
                    if model == 'prolate' or model == 'oblate':
                        name = name + ' spheroid'

                    # Throw an error to prevent misuse of the script.
                    raise RelaxError("Multiple rounds of optimisation of the " + name + " (between 8 to 15) are required for the proper execution of this script.")

                # Create the data pipe.
                self.interpreter.pipe.create(self.name_pipe(model), 'mf', bundle=self.pipe_bundle)

                # Load the diffusion model results.
                self.interpreter.results.read(file='results', dir=self.results_dir+model + sep+'round_'+repr(self.round)+sep+'opt')

            # Model selection between MI to MV.
            self.model_selection(modsel_pipe=self.name_pipe('final'), write_flag=False)


            # Monte Carlo simulations.
            ##########################

            # Fix the diffusion tensor, if it exists.
            if hasattr(get_pipe(self.name_pipe('final')), 'diff_tensor'):
                self.interpreter.fix('diff')

            # Simulations.
            self.interpreter.monte_carlo.setup(number=self.mc_sim_num)
            self.interpreter.monte_carlo.create_data()
            self.interpreter.monte_carlo.initial_values()
            self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)
            self.interpreter.eliminate()
            self.interpreter.monte_carlo.error_analysis()


            # Write the final results.
            ##########################

            # Create results files and plots of the data.
            self.write_results()


        # Unknown script behaviour.
        ###########################

        else:
            raise RelaxError("Unknown diffusion model, change the value of 'self.diff_model'")
Exemple #15
0
    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # The pipe name.
        if not has_pipe(self.pipe_name):
            raise RelaxNoPipeError(self.pipe_name)
Exemple #16
0
    def __init__(self,
                 parent,
                 id=-1,
                 pos=wx.Point(-1, -1),
                 size=wx.Size(-1, -1),
                 style=524288,
                 name='scrolledpanel',
                 gui=None,
                 analysis_name=None,
                 pipe_name=None,
                 pipe_bundle=None,
                 uf_exec=[],
                 data_index=None):
        """Build the automatic R1 and R2 analysis GUI frame elements.

        @param parent:          The parent wx element.
        @type parent:           wx object
        @keyword id:            The unique ID number.
        @type id:               int
        @keyword pos:           The position.
        @type pos:              wx.Size object
        @keyword size:          The size.
        @type size:             wx.Size object
        @keyword style:         The style.
        @type style:            int
        @keyword name:          The name for the panel.
        @type name:             unicode
        @keyword gui:           The main GUI class.
        @type gui:              gui.relax_gui.Main instance
        @keyword analysis_name: The name of the analysis (the name in the tab part of the notebook).
        @type analysis_name:    str
        @keyword pipe_name:     The name of the data pipe associated with this analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The name of the data pipe bundle associated with this analysis.
        @type pipe_bundle:      str
        @keyword uf_exec:       The list of user function on_execute methods returned from the new analysis wizard.
        @type uf_exec:          list of methods
        @keyword data_index:    The index of the analysis in the relax data store (set to None if no data currently exists).
        @type data_index:       None or int
        """

        # Store the GUI main class.
        self.gui = gui

        # Init.
        self.init_flag = True

        # New data container.
        if data_index == None:
            # First create the data pipe if not already in existence.
            if not has_pipe(pipe_name):
                self.gui.interpreter.apply('pipe.create',
                                           pipe_name=pipe_name,
                                           pipe_type='relax_fit',
                                           bundle=pipe_bundle)

            # Create the data pipe bundle if needed.
            if not has_bundle(pipe_bundle):
                self.gui.interpreter.apply('pipe.bundle',
                                           bundle=pipe_bundle,
                                           pipe=pipe_name)

            # Generate a storage container in the relax data store, and alias it for easy access.
            data_index = ds.relax_gui.analyses.add(self.label)

            # Store the analysis and pipe names.
            ds.relax_gui.analyses[data_index].analysis_name = analysis_name
            ds.relax_gui.analyses[data_index].pipe_name = pipe_name
            ds.relax_gui.analyses[data_index].pipe_bundle = pipe_bundle

            # Initialise the variables.
            ds.relax_gui.analyses[data_index].frq = ''
            ds.relax_gui.analyses[data_index].grid_inc = None
            ds.relax_gui.analyses[data_index].mc_sim_num = None
            ds.relax_gui.analyses[
                data_index].save_dir = self.gui.system_cwd_path

        # Alias the data.
        self.data = ds.relax_gui.analyses[data_index]
        self.data_index = data_index

        # Register the method for updating the spin count for the completion of user functions.
        self.observer_register()

        # Execute the base class method to build the panel.
        super(Auto_rx, self).__init__(parent,
                                      id=id,
                                      pos=pos,
                                      size=size,
                                      style=style,
                                      name=name)
Exemple #17
0
    def __init__(self, parent, id=-1, pos=wx.Point(-1, -1), size=wx.Size(-1, -1), style=524288, name='scrolledpanel', gui=None, analysis_name=None, pipe_name=None, pipe_bundle=None, uf_exec=[], data_index=None):
        """Build the automatic R1 and R2 analysis GUI frame elements.

        @param parent:          The parent wx element.
        @type parent:           wx object
        @keyword id:            The unique ID number.
        @type id:               int
        @keyword pos:           The position.
        @type pos:              wx.Size object
        @keyword size:          The size.
        @type size:             wx.Size object
        @keyword style:         The style.
        @type style:            int
        @keyword name:          The name for the panel.
        @type name:             unicode
        @keyword gui:           The main GUI class.
        @type gui:              gui.relax_gui.Main instance
        @keyword analysis_name: The name of the analysis (the name in the tab part of the notebook).
        @type analysis_name:    str
        @keyword pipe_name:     The name of the data pipe associated with this analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The name of the data pipe bundle associated with this analysis.
        @type pipe_bundle:      str
        @keyword uf_exec:       The list of user function on_execute methods returned from the new analysis wizard.
        @type uf_exec:          list of methods
        @keyword data_index:    The index of the analysis in the relax data store (set to None if no data currently exists).
        @type data_index:       None or int
        """

        # Store the GUI main class.
        self.gui = gui

        # Init.
        self.init_flag = True

        # New data container.
        if data_index == None:
            # First create the data pipe if not already in existence.
            if not has_pipe(pipe_name):
                self.gui.interpreter.apply('pipe.create', pipe_name=pipe_name, pipe_type='relax_disp', bundle=pipe_bundle)

            # Create the data pipe bundle if needed.
            if not has_bundle(pipe_bundle):
                self.gui.interpreter.apply('pipe.bundle', bundle=pipe_bundle, pipe=pipe_name)

            # Generate a storage container in the relax data store, and alias it for easy access.
            data_index = ds.relax_gui.analyses.add(self.label)

            # Store the analysis and pipe names.
            ds.relax_gui.analyses[data_index].analysis_name = analysis_name
            ds.relax_gui.analyses[data_index].pipe_name = pipe_name
            ds.relax_gui.analyses[data_index].pipe_bundle = pipe_bundle

            # Initialise the variables.
            ds.relax_gui.analyses[data_index].r1_fit = False
            ds.relax_gui.analyses[data_index].numeric_only = False
            ds.relax_gui.analyses[data_index].grid_inc = None
            ds.relax_gui.analyses[data_index].mc_sim_num = None
            ds.relax_gui.analyses[data_index].exp_mc_sim_num = None
            ds.relax_gui.analyses[data_index].pre_run_dir = None
            ds.relax_gui.analyses[data_index].mc_sim_all_models = False
            ds.relax_gui.analyses[data_index].insignificance = 1.0
            ds.relax_gui.analyses[data_index].save_dir = self.gui.launch_dir

            # Set the default dispersion models based on the experiment type.
            ds.relax_gui.analyses[data_index].disp_models = [
                MODEL_R2EFF,
                MODEL_NOREX,
                MODEL_CR72,
                MODEL_NS_CPMG_2SITE_EXPANDED,
                MODEL_MP05,
                MODEL_NS_R1RHO_2SITE
            ]

        # Error checking.
        if ds.relax_gui.analyses[data_index].pipe_bundle == None:
            raise RelaxError("The pipe bundle must be supplied.")

        # Alias the data.
        self.data = ds.relax_gui.analyses[data_index]
        self.data_index = data_index

        # Register the method for updating the spin count for the completion of user functions.
        self.observer_register()

        # Execute the base class method to build the panel.
        super(Auto_relax_disp, self).__init__(parent, id=id, pos=pos, size=size, style=style, name=name)

        # Optimisation variables for speeding up the test suite.
        self.opt_func_tol = 1e-25
        self.opt_max_iterations = int(1e7)

        # Update the isotope and cluster information.
        self.update_clusters()
Exemple #18
0
    def __init__(self,
                 parent,
                 id=-1,
                 pos=wx.Point(-1, -1),
                 size=wx.Size(-1, -1),
                 style=524288,
                 name='scrolledpanel',
                 gui=None,
                 analysis_name=None,
                 pipe_name=None,
                 pipe_bundle=None,
                 uf_exec=[],
                 data_index=None):
        """Build the automatic model-free protocol GUI element.

        @param parent:          The parent wx element.
        @type parent:           wx object
        @keyword id:            The unique ID number.
        @type id:               int
        @keyword pos:           The position.
        @type pos:              wx.Size object
        @keyword size:          The size.
        @type size:             wx.Size object
        @keyword style:         The style.
        @type style:            int
        @keyword name:          The name for the panel.
        @type name:             unicode
        @keyword gui:           The main GUI class.
        @type gui:              gui.relax_gui.Main instance
        @keyword analysis_name: The name of the analysis (the name in the tab part of the notebook).
        @type analysis_name:    str
        @keyword pipe_name:     The name of the original data pipe for this analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The name of the data pipe bundle associated with this analysis.
        @type pipe_bundle:      str
        @keyword uf_exec:       The list of user function on_execute methods returned from the new analysis wizard.
        @type uf_exec:          list of methods
        @keyword data_index:    The index of the analysis in the relax data store (set to None if no data currently exists).
        @type data_index:       None or int
        """

        # Store the GUI main class.
        self.gui = gui

        # Init.
        self.init_flag = True

        # New data container.
        if data_index == None:
            # First create the data pipe if not already in existence.
            if not has_pipe(pipe_name):
                self.gui.interpreter.apply('pipe.create',
                                           pipe_name=pipe_name,
                                           pipe_type='mf',
                                           bundle=pipe_bundle)

            # Create the data pipe bundle if needed.
            if not has_bundle(pipe_bundle):
                self.gui.interpreter.apply('pipe.bundle',
                                           bundle=pipe_bundle,
                                           pipe=pipe_name)

            # Generate a storage container in the relax data store, and alias it for easy access.
            data_index = ds.relax_gui.analyses.add('model-free')

            # Store the analysis and pipe names.
            ds.relax_gui.analyses[data_index].analysis_name = analysis_name
            ds.relax_gui.analyses[data_index].pipe_name = pipe_name
            ds.relax_gui.analyses[data_index].pipe_bundle = pipe_bundle

            # Initialise the variables.
            ds.relax_gui.analyses[data_index].grid_inc = None
            ds.relax_gui.analyses[data_index].diff_tensor_grid_inc = {
                'sphere': 11,
                'prolate': 11,
                'oblate': 11,
                'ellipsoid': 6
            }
            ds.relax_gui.analyses[data_index].mc_sim_num = None
            ds.relax_gui.analyses[
                data_index].save_dir = self.gui.system_cwd_path
            ds.relax_gui.analyses[data_index].local_tm_models = [
                'tm0', 'tm1', 'tm2', 'tm3', 'tm4', 'tm5', 'tm6', 'tm7', 'tm8',
                'tm9'
            ]
            ds.relax_gui.analyses[data_index].mf_models = [
                'm0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9'
            ]
            ds.relax_gui.analyses[data_index].max_iter = 30

        # Error checking.
        if ds.relax_gui.analyses[data_index].pipe_bundle == None:
            raise RelaxError("The pipe bundle must be supplied.")

        # Alias the data.
        self.data = ds.relax_gui.analyses[data_index]
        self.data_index = data_index

        # Backward compatibility.
        if not hasattr(self.data, 'local_tm_models'):
            self.data.local_tm_models = [
                'tm0', 'tm1', 'tm2', 'tm3', 'tm4', 'tm5', 'tm6', 'tm7', 'tm8',
                'tm9'
            ]
        if not hasattr(self.data, 'mf_models'):
            self.data.mf_models = [
                'm0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9'
            ]

        # Initialise the mode selection window.
        self.mode_win = Protocol_mode_sel_window()

        # Register the method for updating the spin count for the completion of user functions.
        self.observer_register()

        # Execute the base class method to build the panel.
        super(Auto_model_free, self).__init__(parent,
                                              id=id,
                                              pos=pos,
                                              size=size,
                                              style=style,
                                              name=name)
def select(method=None, modsel_pipe=None, bundle=None, pipes=None):
    """Model selection function.

    @keyword method:        The model selection method.  This can currently be one of:
                                - 'AIC', Akaike's Information Criteria.
                                - 'AICc', Small sample size corrected AIC.
                                - 'BIC', Bayesian or Schwarz Information Criteria.
                                - 'CV', Single-item-out cross-validation.
                            None of the other model selection techniques are currently supported.
    @type method:           str
    @keyword modsel_pipe:   The name of the new data pipe to be created by copying of the selected data pipe.
    @type modsel_pipe:      str
    @keyword bundle:        The optional data pipe bundle to associate the newly created pipe with.
    @type bundle:           str or None
    @keyword pipes:         A list of the data pipes to use in the model selection.
    @type pipes:            list of str
    """

    # Test if the pipe already exists.
    if has_pipe(modsel_pipe):
        raise RelaxPipeError(modsel_pipe)

    # Use all pipes.
    if pipes == None:
        # Get all data pipe names from the relax data store.
        pipes = pipe_names()

    # Select the model selection technique.
    if method == 'AIC':
        print("AIC model selection.")
        formula = aic
    elif method == 'AICc':
        print("AICc model selection.")
        formula = aicc
    elif method == 'BIC':
        print("BIC model selection.")
        formula = bic
    elif method == 'CV':
        print("CV model selection.")
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")
    else:
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")

    # No pipes.
    if len(pipes) == 0:
        raise RelaxError("No data pipes are available for use in model selection.")

    # Initialise.
    function_type = {}
    model_loop = {}
    model_type = {}
    duplicate_data = {}
    model_statistics = {}
    skip_function = {}
    modsel_pipe_exists = False

    # Cross validation setup.
    if isinstance(pipes[0], list):
        # No pipes.
        if len(pipes[0]) == 0:
            raise RelaxError("No pipes are available for use in model selection in the array " + repr(pipes[0]) + ".")

        # Loop over the data pipes.
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                # Specific functions.
                model_loop[pipes[i][j]] = get_specific_fn('model_loop', get_type(pipes[i][j]))
                model_type[pipes[i][j]] = get_specific_fn('model_type', get_type(pipes[i][j]))
                duplicate_data[pipes[i][j]] = get_specific_fn('duplicate_data', get_type(pipes[i][j]))
                model_statistics[pipes[i][j]] = get_specific_fn('model_stats', get_type(pipes[i][j]))
                skip_function[pipes[i][j]] = get_specific_fn('skip_function', get_type(pipes[i][j]))

        # The model loop should be the same for all data pipes!
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_loop[pipes[0][j]] != model_loop[pipes[i][j]]:
                    raise RelaxError("The models for each data pipes should be the same.")
        model_loop = model_loop[pipes[0][0]]

        # The model description.
        model_desc = get_specific_fn('model_desc', get_type(pipes[0]))

        # Global vs. local models.
        global_flag = False
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_type[pipes[i][j]]() == 'global':
                    global_flag = True

    # All other model selection setup.
    else:
        # Loop over the data pipes.
        for i in range(len(pipes)):
            # Specific functions.
            model_loop[pipes[i]] = get_specific_fn('model_loop', get_type(pipes[i]))
            model_type[pipes[i]] = get_specific_fn('model_type', get_type(pipes[i]))
            duplicate_data[pipes[i]] = get_specific_fn('duplicate_data', get_type(pipes[i]))
            model_statistics[pipes[i]] = get_specific_fn('model_stats', get_type(pipes[i]))
            skip_function[pipes[i]] = get_specific_fn('skip_function', get_type(pipes[i]))

        model_loop = model_loop[pipes[0]]

        # The model description.
        model_desc = get_specific_fn('model_desc', get_type(pipes[0]))

        # Global vs. local models.
        global_flag = False
        for j in range(len(pipes)):
            if model_type[pipes[j]]() == 'global':
                global_flag = True


    # Loop over the base models.
    for model_info in model_loop():
        # Print out.
        print("\n")
        desc = model_desc(model_info)
        if desc:
            print(desc)

        # Initial model.
        best_model = None
        best_crit = 1e300
        data = []

        # Loop over the pipes.
        for j in range(len(pipes)):
            # Single-item-out cross validation.
            if method == 'CV':
                # Sum of chi-squared values.
                sum_crit = 0.0

                # Loop over the validation samples and sum the chi-squared values.
                for k in range(len(pipes[j])):
                    # Alias the data pipe name.
                    pipe = pipes[j][k]

                    # Switch to this pipe.
                    switch(pipe)

                    # Skip function.
                    if skip_function[pipe](model_info):
                        continue

                    # Get the model statistics.
                    k, n, chi2 = model_statistics[pipe](model_info)

                    # Missing data sets.
                    if k == None or n == None or chi2 == None:
                        continue

                    # Chi2 sum.
                    sum_crit = sum_crit + chi2

                # Cross-validation criterion (average chi-squared value).
                crit = sum_crit / float(len(pipes[j]))

            # Other model selection methods.
            else:
                # Reassign the pipe.
                pipe = pipes[j]

                # Switch to this pipe.
                switch(pipe)

                # Skip function.
                if skip_function[pipe](model_info):
                    continue

                # Get the model statistics.
                k, n, chi2 = model_statistics[pipe](model_info, global_stats=global_flag)

                # Missing data sets.
                if k == None or n == None or chi2 == None:
                    continue

                # Calculate the criterion value.
                crit = formula(chi2, float(k), float(n))

                # Store the values for a later printout.
                data.append([pipe, repr(k), repr(n), "%.5f" % chi2, "%.5f" % crit])

            # Select model.
            if crit < best_crit:
                best_model = pipe
                best_crit = crit

        # Write out the table.
        write_data(out=sys.stdout, headings=["Data pipe", "Num_params_(k)", "Num_data_sets_(n)", "Chi2", "Criterion"], data=data)

        # Duplicate the data from the 'best_model' to the model selection data pipe.
        if best_model != None:
            # Print out of selected model.
            print("The model from the data pipe " + repr(best_model) + " has been selected.")

            # Switch to the selected data pipe.
            switch(best_model)

            # Duplicate.
            duplicate_data[best_model](best_model, modsel_pipe, model_info, global_stats=global_flag, verbose=False)

            # Model selection pipe now exists.
            modsel_pipe_exists = True

        # No model selected.
        else:
            # Print out of selected model.
            print("No model has been selected.")

    # Switch to the model selection pipe.
    if modsel_pipe_exists:
        switch(modsel_pipe)

    # Bundle the data pipe.
    if bundle:
        pipe_control.pipes.bundle(bundle=bundle, pipe=modsel_pipe)
Exemple #20
0
def select(method=None, modsel_pipe=None, bundle=None, pipes=None):
    """Model selection function.

    @keyword method:        The model selection method.  This can currently be one of:
                                - 'AIC', Akaike's Information Criteria.
                                - 'AICc', Small sample size corrected AIC.
                                - 'BIC', Bayesian or Schwarz Information Criteria.
                                - 'CV', Single-item-out cross-validation.
                            None of the other model selection techniques are currently supported.
    @type method:           str
    @keyword modsel_pipe:   The name of the new data pipe to be created by copying of the selected data pipe.
    @type modsel_pipe:      str
    @keyword bundle:        The optional data pipe bundle to associate the newly created pipe with.
    @type bundle:           str or None
    @keyword pipes:         A list of the data pipes to use in the model selection.
    @type pipes:            list of str
    """

    # Test if the pipe already exists.
    if has_pipe(modsel_pipe):
        raise RelaxPipeError(modsel_pipe)

    # Use all pipes.
    if pipes == None:
        # Get all data pipe names from the relax data store.
        pipes = pipe_names()

    # Select the model selection technique.
    if method == 'AIC':
        print("AIC model selection.")
        formula = aic
    elif method == 'AICc':
        print("AICc model selection.")
        formula = aicc
    elif method == 'BIC':
        print("BIC model selection.")
        formula = bic
    elif method == 'CV':
        print("CV model selection.")
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")
    else:
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")

    # No pipes.
    if len(pipes) == 0:
        raise RelaxError("No data pipes are available for use in model selection.")

    # Initialise.
    function_type = {}
    model_loop = {}
    model_type = {}
    duplicate_data = {}
    model_statistics = {}
    skip_function = {}
    modsel_pipe_exists = False

    # Cross validation setup.
    if isinstance(pipes[0], list):
        # No pipes.
        if len(pipes[0]) == 0:
            raise RelaxError("No pipes are available for use in model selection in the array " + repr(pipes[0]) + ".")

        # Loop over the data pipes.
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                # The specific analysis API object.
                api = return_api(pipe_name=pipes[i][j])

                # Store the specific functions.
                model_loop[pipes[i][j]] = api.model_loop
                model_type[pipes[i][j]] = api.model_type
                duplicate_data[pipes[i][j]] = api.duplicate_data
                model_statistics[pipes[i][j]] = api.model_statistics
                skip_function[pipes[i][j]] = api.skip_function

        # The model loop should be the same for all data pipes!
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_loop[pipes[0][j]] != model_loop[pipes[i][j]]:
                    raise RelaxError("The models for each data pipes should be the same.")

        # Alias some function from the specific API of the first data pipe.
        api = return_api(pipe_name=pipes[0][0])
        model_loop = api.model_loop
        model_desc = api.model_desc

        # Global vs. local models.
        global_flag = False
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_type[pipes[i][j]]() == 'global':
                    global_flag = True

    # All other model selection setup.
    else:
        # Loop over the data pipes.
        for i in range(len(pipes)):
            # The specific analysis API object.
            api = return_api()

            # Store the specific functions.
            model_loop[pipes[i]] = api.model_loop
            model_type[pipes[i]] = api.model_type
            duplicate_data[pipes[i]] = api.duplicate_data
            model_statistics[pipes[i]] = api.model_statistics
            skip_function[pipes[i]] = api.skip_function

        # Alias some function from the specific API of the first data pipe.
        api = return_api(pipe_name=pipes[0])
        model_loop = api.model_loop
        model_desc = api.model_desc

        # Global vs. local models.
        global_flag = False
        for j in range(len(pipes)):
            if model_type[pipes[j]]() == 'global':
                global_flag = True


    # Loop over the base models.
    for model_info in model_loop():
        # Print out.
        print("\n")
        desc = model_desc(model_info)
        if desc:
            print(desc)

        # Initial model.
        best_model = None
        best_crit = 1e300
        data = []

        # Loop over the pipes.
        for j in range(len(pipes)):
            # Single-item-out cross validation.
            if method == 'CV':
                # Sum of chi-squared values.
                sum_crit = 0.0

                # Loop over the validation samples and sum the chi-squared values.
                for k in range(len(pipes[j])):
                    # Alias the data pipe name.
                    pipe = pipes[j][k]

                    # Switch to this pipe.
                    switch(pipe)

                    # Skip function.
                    if skip_function[pipe](model_info):
                        continue

                    # Get the model statistics.
                    k, n, chi2 = model_statistics[pipe](model_info)

                    # Missing data sets.
                    if k == None or n == None or chi2 == None:
                        continue

                    # Chi2 sum.
                    sum_crit = sum_crit + chi2

                # Cross-validation criterion (average chi-squared value).
                crit = sum_crit / float(len(pipes[j]))

            # Other model selection methods.
            else:
                # Reassign the pipe.
                pipe = pipes[j]

                # Switch to this pipe.
                switch(pipe)

                # Skip function.
                if skip_function[pipe](model_info):
                    continue

                # Get the model statistics.
                k, n, chi2 = model_statistics[pipe](model_info, global_stats=global_flag)

                # Missing data sets.
                if k == None or n == None or chi2 == None:
                    continue

                # Calculate the criterion value.
                crit = formula(chi2, float(k), float(n))

                # Store the values for a later printout.
                data.append([pipe, repr(k), repr(n), "%.5f" % chi2, "%.5f" % crit])

            # Select model.
            if crit < best_crit:
                best_model = pipe
                best_crit = crit

        # Write out the table.
        write_data(out=sys.stdout, headings=["Data pipe", "Num_params_(k)", "Num_data_sets_(n)", "Chi2", "Criterion"], data=data)

        # Duplicate the data from the 'best_model' to the model selection data pipe.
        if best_model != None:
            # Print out of selected model.
            print("The model from the data pipe " + repr(best_model) + " has been selected.")

            # Switch to the selected data pipe.
            switch(best_model)

            # Duplicate.
            duplicate_data[best_model](best_model, modsel_pipe, model_info, global_stats=global_flag, verbose=False)

            # Model selection pipe now exists.
            modsel_pipe_exists = True

        # No model selected.
        else:
            # Print out of selected model.
            print("No model has been selected.")

    # Switch to the model selection pipe.
    if modsel_pipe_exists:
        switch(modsel_pipe)

    # Bundle the data pipe.
    if bundle:
        pipe_control.pipes.bundle(bundle=bundle, pipe=modsel_pipe)

    # Update all of the required metadata structures.
    mol_res_spin.metadata_update()
    interatomic.metadata_update()
Exemple #21
0
    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # The pipe name.
        if not has_pipe(self.pipe_name):
            raise RelaxNoPipeError(self.pipe_name)
Exemple #22
0
    def __init__(self, parent, id=-1, pos=wx.Point(-1, -1), size=wx.Size(-1, -1), style=524288, name='scrolledpanel', gui=None, analysis_name=None, pipe_name=None, pipe_bundle=None, uf_exec=[], data_index=None):
        """Build the automatic R1 and R2 analysis GUI frame elements.

        @param parent:          The parent wx element.
        @type parent:           wx object
        @keyword id:            The unique ID number.
        @type id:               int
        @keyword pos:           The position.
        @type pos:              wx.Size object
        @keyword size:          The size.
        @type size:             wx.Size object
        @keyword style:         The style.
        @type style:            int
        @keyword name:          The name for the panel.
        @type name:             unicode
        @keyword gui:           The main GUI class.
        @type gui:              gui.relax_gui.Main instance
        @keyword analysis_name: The name of the analysis (the name in the tab part of the notebook).
        @type analysis_name:    str
        @keyword pipe_name:     The name of the data pipe associated with this analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The name of the data pipe bundle associated with this analysis.
        @type pipe_bundle:      str
        @keyword uf_exec:       The list of user function on_execute methods returned from the new analysis wizard.
        @type uf_exec:          list of methods
        @keyword data_index:    The index of the analysis in the relax data store (set to None if no data currently exists).
        @type data_index:       None or int
        """

        # Store the GUI main class.
        self.gui = gui

        # Init.
        self.init_flag = True

        # New data container.
        if data_index == None:
            # First create the data pipe if not already in existence.
            if not has_pipe(pipe_name):
                self.gui.interpreter.apply('pipe.create', pipe_name=pipe_name, pipe_type='relax_disp', bundle=pipe_bundle)

            # Create the data pipe bundle if needed.
            if not has_bundle(pipe_bundle):
                self.gui.interpreter.apply('pipe.bundle', bundle=pipe_bundle, pipe=pipe_name)

            # Generate a storage container in the relax data store, and alias it for easy access.
            data_index = ds.relax_gui.analyses.add(self.label)

            # Store the analysis and pipe names.
            ds.relax_gui.analyses[data_index].analysis_name = analysis_name
            ds.relax_gui.analyses[data_index].pipe_name = pipe_name
            ds.relax_gui.analyses[data_index].pipe_bundle = pipe_bundle

            # Initialise the variables.
            ds.relax_gui.analyses[data_index].r1_fit = False
            ds.relax_gui.analyses[data_index].numeric_only = False
            ds.relax_gui.analyses[data_index].grid_inc = None
            ds.relax_gui.analyses[data_index].mc_sim_num = None
            ds.relax_gui.analyses[data_index].exp_mc_sim_num = None
            ds.relax_gui.analyses[data_index].pre_run_dir = None
            ds.relax_gui.analyses[data_index].mc_sim_all_models = False
            ds.relax_gui.analyses[data_index].insignificance = 1.0
            ds.relax_gui.analyses[data_index].save_dir = self.gui.system_cwd_path

            # Set the default dispersion models based on the experiment type.
            ds.relax_gui.analyses[data_index].disp_models = [
                MODEL_R2EFF,
                MODEL_NOREX,
                MODEL_CR72,
                MODEL_NS_CPMG_2SITE_EXPANDED,
                MODEL_MP05,
                MODEL_NS_R1RHO_2SITE
            ]

        # Error checking.
        if ds.relax_gui.analyses[data_index].pipe_bundle == None:
            raise RelaxError("The pipe bundle must be supplied.")

        # Alias the data.
        self.data = ds.relax_gui.analyses[data_index]
        self.data_index = data_index

        # Register the method for updating the spin count for the completion of user functions.
        self.observer_register()

        # Execute the base class method to build the panel.
        super(Auto_relax_disp, self).__init__(parent, id=id, pos=pos, size=size, style=style, name=name)

        # Optimisation variables for speeding up the test suite.
        self.opt_func_tol = 1e-25
        self.opt_max_iterations = int(1e7)

        # Update the isotope and cluster information.
        self.update_clusters()