Esempio n. 1
0
    def update_grid_safe(self):
        """Update the grid with the pipe data."""

        # First freeze the grid, so that the GUI element doesn't update until the end.
        self.grid.Freeze()

        # Acquire the pipe lock.
        status.pipe_lock.acquire('pipe editor window')

        # Delete the rows, leaving a single row.
        self.grid.DeleteRows(numRows=self.grid.GetNumberRows()-1)

        # Clear the contents of the first row.
        for i in range(self.grid.GetNumberCols()):
            self.grid.SetCellValue(0, i, str_to_gui(""))

        # The data pipes.
        pipe_list = pipe_names()
        n = len(pipe_list)

        # Append the appropriate number of rows.
        if n >= 1:
            self.grid.AppendRows(numRows=n-1)

        # Loop over the data pipes.
        for i in range(n):
            # Set the pipe name.
            self.grid.SetCellValue(i, 0, str_to_gui(pipe_list[i]))

            # Set the pipe type.
            self.grid.SetCellValue(i, 1, str_to_gui(get_type(pipe_list[i])))

            # Set the pipe bundle.
            self.grid.SetCellValue(i, 2, str_to_gui(get_bundle(pipe_list[i])))

            # Set the current pipe.
            if pipe_list[i] == cdp_name():
                self.grid.SetCellValue(i, 3, str_to_gui("cdp"))

            # Set the tab the pipe belongs to.
            self.grid.SetCellValue(i, 4, str_to_gui(self.gui.analysis.page_name_from_bundle(get_bundle(pipe_list[i]))))

        # Set the grid properties once finalised.
        for i in range(self.grid.GetNumberRows()):
            # Row properties.
            self.grid.SetRowSize(i, 27)

            # Loop over the columns.
            for j in range(self.grid.GetNumberCols()):
                # Cell properties.
                self.grid.SetReadOnly(i, j)

        # Release the lock.
        status.pipe_lock.release('pipe editor window')

        # Unfreeze.
        self.grid.Thaw()
Esempio n. 2
0
    def update_grid_safe(self):
        """Update the grid with the pipe data."""

        # First freeze the grid, so that the GUI element doesn't update until the end.
        self.grid.Freeze()

        # Acquire the pipe lock.
        status.pipe_lock.acquire('pipe editor window')

        # Delete the rows, leaving a single row.
        self.grid.DeleteRows(numRows=self.grid.GetNumberRows()-1)

        # Clear the contents of the first row.
        for i in range(self.grid.GetNumberCols()):
            self.grid.SetCellValue(0, i, str_to_gui(""))

        # The data pipes.
        pipe_list = pipe_names()
        n = len(pipe_list)

        # Append the appropriate number of rows.
        if n >= 1:
            self.grid.AppendRows(numRows=n-1)

        # Loop over the data pipes.
        for i in range(n):
            # Set the pipe name.
            self.grid.SetCellValue(i, 0, str_to_gui(pipe_list[i]))

            # Set the pipe type.
            self.grid.SetCellValue(i, 1, str_to_gui(get_type(pipe_list[i])))

            # Set the pipe bundle.
            self.grid.SetCellValue(i, 2, str_to_gui(get_bundle(pipe_list[i])))

            # Set the current pipe.
            if pipe_list[i] == cdp_name():
                self.grid.SetCellValue(i, 3, str_to_gui("cdp"))

            # Set the tab the pipe belongs to.
            self.grid.SetCellValue(i, 4, str_to_gui(self.gui.analysis.page_name_from_bundle(get_bundle(pipe_list[i]))))

        # Set the grid properties once finalised.
        for i in range(self.grid.GetNumberRows()):
            # Row properties.
            self.grid.SetRowSize(i, 27)

            # Loop over the columns.
            for j in range(self.grid.GetNumberCols()):
                # Cell properties.
                self.grid.SetReadOnly(i, j)

        # Release the lock.
        status.pipe_lock.release('pipe editor window')

        # Unfreeze.
        self.grid.Thaw()
Esempio n. 3
0
    def update_pipes(self):
        """Update the data pipe list."""

        # Clear the previous data pipe.
        self.pipe_name.Clear()

        # The list of data pipes.
        for pipe in pipe_names():
            self.pipe_name.Append(str_to_gui(pipe))

        # Set the name to the current data pipe.
        self.pipe_name.SetValue(str_to_gui(cdp_name()))
Esempio n. 4
0
    def update_pipes(self):
        """Update the data pipe list."""

        # Clear the previous data pipe.
        self.pipe_name.Clear()

        # The list of data pipes.
        for pipe in pipe_names():
            self.pipe_name.Append(str_to_gui(pipe))

        # Set the name to the current data pipe.
        self.pipe_name.SetValue(str_to_gui(cdp_name()))
Esempio n. 5
0
    def update_pipes(self, event=None):
        """Update the spin view data pipe selector.

        @keyword event: The wx event.
        @type event:    wx event
        """

        # Change the cursor to busy.
        wx.BeginBusyCursor()

        # Init.
        pipe_switch = False

        # The selected pipe.
        if event:
            # The name of the selected pipe.
            pipe = gui_to_str(self.pipe_name.GetString(event.GetSelection()))

            # A pipe change.
            if pipe != cdp_name():
                pipe_switch = True
        else:
            pipe = cdp_name()
        if not pipe:
            pipe = ''

        # Clear the previous data.
        self.pipe_name.Clear()

        # The list of pipe names.
        for name in pipe_names():
            self.pipe_name.Append(str_to_gui(name))

        # Switch.
        if pipe_switch:
            # Switch data pipes.
            self.gui.interpreter.apply('pipe.switch', pipe)

            # Update the tree view.
            self.tree_panel.update()

        # Set the pipe name to the cdp.
        self.pipe_name.SetValue(str_to_gui(pipe))

        # Reset the cursor.
        if wx.IsBusy():
            wx.EndBusyCursor()
Esempio n. 6
0
    def update_pipes(self, event=None):
        """Update the spin view data pipe selector.

        @keyword event: The wx event.
        @type event:    wx event
        """

        # Change the cursor to busy.
        wx.BeginBusyCursor()

        # Init.
        pipe_switch = False

        # The selected pipe.
        if event:
            # The name of the selected pipe.
            pipe = gui_to_str(self.pipe_name.GetString(event.GetSelection()))

            # A pipe change.
            if pipe != cdp_name():
                pipe_switch = True
        else:
            pipe = cdp_name()
        if not pipe:
            pipe = ''

        # Clear the previous data.
        self.pipe_name.Clear()

        # The list of pipe names.
        for name in pipe_names():
            self.pipe_name.Append(str_to_gui(name))

        # Switch.
        if pipe_switch:
            # Switch data pipes.
            self.gui.interpreter.apply('pipe.switch', pipe)

            # Update the tree view.
            self.tree_panel.update()

        # Set the pipe name to the cdp.
        self.pipe_name.SetValue(str_to_gui(pipe))

        # Reset the cursor.
        if wx.IsBusy():
            wx.EndBusyCursor()
Esempio n. 7
0
    def _hybridise(self, hybrid=None, pipe_list=None):
        """Create the hybrid data pipe.

        @keyword hybrid:    The name of the new hybrid data pipe.
        @type hybrid:       str
        @keyword pipe_list: The list of data pipes that the hybrid is composed of.
        @type pipe_list:    list of str
        """

        # Test if the hybrid data pipe already exists.
        if hybrid in pipes.pipe_names():
            raise RelaxPipeError(hybrid)

        # Loop over the pipes to be hybridised and check them.
        pipe_type = pipes.get_type(pipe_list[0])
        for pipe in pipe_list:
            # Switch to the data pipe.
            pipes.switch(pipe)

            # Test if the pipe exists.
            check_pipe()

            # Check that the pipe types match.
            if pipes.get_type() != pipe_type:
                raise RelaxError("The data pipe types do not match.")

            # Test if sequence data is loaded.
            if not exists_mol_res_spin_data():
                raise RelaxNoSequenceError

        # Check that the sequence data matches in all pipes.
        for i in range(1, len(pipe_list)):
            compare_sequence(pipe_list[0], pipe_list[1])

        # Create the data pipe.
        pipes.create(pipe_name=hybrid, pipe_type='hybrid')

        # Store the pipe list forming the hybrid.
        cdp.hybrid_pipes = pipe_list
Esempio n. 8
0
    def _hybridise(self, hybrid=None, pipe_list=None):
        """Create the hybrid data pipe.

        @keyword hybrid:    The name of the new hybrid data pipe.
        @type hybrid:       str
        @keyword pipe_list: The list of data pipes that the hybrid is composed of.
        @type pipe_list:    list of str
        """

        # Test if the hybrid data pipe already exists.
        if hybrid in pipes.pipe_names():
            raise RelaxPipeError(hybrid)

        # Loop over the pipes to be hybridised and check them.
        pipe_type = pipes.get_type(pipe_list[0])
        for pipe in pipe_list:
            # Switch to the data pipe.
            pipes.switch(pipe)

            # Test if the pipe exists.
            check_pipe()

            # Check that the pipe types match.
            if pipes.get_type() != pipe_type:
                raise RelaxError("The data pipe types do not match.")

            # Test if sequence data is loaded.
            if not exists_mol_res_spin_data():
                raise RelaxNoSequenceError

        # Check that the sequence data matches in all pipes.
        for i in range(1, len(pipe_list)):
            compare_sequence(pipe_list[0], pipe_list[1])

        # Create the data pipe.
        pipes.create(pipe_name=hybrid, pipe_type='hybrid')

        # Store the pipe list forming the hybrid.
        cdp.hybrid_pipes = pipe_list
Esempio n. 9
0
def select(method=None, modsel_pipe=None, bundle=None, pipes=None):
    """Model selection function.

    @keyword method:        The model selection method.  This can currently be one of:
                                - 'AIC', Akaike's Information Criteria.
                                - 'AICc', Small sample size corrected AIC.
                                - 'BIC', Bayesian or Schwarz Information Criteria.
                                - 'CV', Single-item-out cross-validation.
                            None of the other model selection techniques are currently supported.
    @type method:           str
    @keyword modsel_pipe:   The name of the new data pipe to be created by copying of the selected data pipe.
    @type modsel_pipe:      str
    @keyword bundle:        The optional data pipe bundle to associate the newly created pipe with.
    @type bundle:           str or None
    @keyword pipes:         A list of the data pipes to use in the model selection.
    @type pipes:            list of str
    """

    # Test if the pipe already exists.
    if has_pipe(modsel_pipe):
        raise RelaxPipeError(modsel_pipe)

    # Use all pipes.
    if pipes == None:
        # Get all data pipe names from the relax data store.
        pipes = pipe_names()

    # Select the model selection technique.
    if method == 'AIC':
        print("AIC model selection.")
        formula = aic
    elif method == 'AICc':
        print("AICc model selection.")
        formula = aicc
    elif method == 'BIC':
        print("BIC model selection.")
        formula = bic
    elif method == 'CV':
        print("CV model selection.")
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")
    else:
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")

    # No pipes.
    if len(pipes) == 0:
        raise RelaxError("No data pipes are available for use in model selection.")

    # Initialise.
    function_type = {}
    model_loop = {}
    model_type = {}
    duplicate_data = {}
    model_statistics = {}
    skip_function = {}
    modsel_pipe_exists = False

    # Cross validation setup.
    if isinstance(pipes[0], list):
        # No pipes.
        if len(pipes[0]) == 0:
            raise RelaxError("No pipes are available for use in model selection in the array " + repr(pipes[0]) + ".")

        # Loop over the data pipes.
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                # Specific functions.
                model_loop[pipes[i][j]] = get_specific_fn('model_loop', get_type(pipes[i][j]))
                model_type[pipes[i][j]] = get_specific_fn('model_type', get_type(pipes[i][j]))
                duplicate_data[pipes[i][j]] = get_specific_fn('duplicate_data', get_type(pipes[i][j]))
                model_statistics[pipes[i][j]] = get_specific_fn('model_stats', get_type(pipes[i][j]))
                skip_function[pipes[i][j]] = get_specific_fn('skip_function', get_type(pipes[i][j]))

        # The model loop should be the same for all data pipes!
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_loop[pipes[0][j]] != model_loop[pipes[i][j]]:
                    raise RelaxError("The models for each data pipes should be the same.")
        model_loop = model_loop[pipes[0][0]]

        # The model description.
        model_desc = get_specific_fn('model_desc', get_type(pipes[0]))

        # Global vs. local models.
        global_flag = False
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_type[pipes[i][j]]() == 'global':
                    global_flag = True

    # All other model selection setup.
    else:
        # Loop over the data pipes.
        for i in range(len(pipes)):
            # Specific functions.
            model_loop[pipes[i]] = get_specific_fn('model_loop', get_type(pipes[i]))
            model_type[pipes[i]] = get_specific_fn('model_type', get_type(pipes[i]))
            duplicate_data[pipes[i]] = get_specific_fn('duplicate_data', get_type(pipes[i]))
            model_statistics[pipes[i]] = get_specific_fn('model_stats', get_type(pipes[i]))
            skip_function[pipes[i]] = get_specific_fn('skip_function', get_type(pipes[i]))

        model_loop = model_loop[pipes[0]]

        # The model description.
        model_desc = get_specific_fn('model_desc', get_type(pipes[0]))

        # Global vs. local models.
        global_flag = False
        for j in range(len(pipes)):
            if model_type[pipes[j]]() == 'global':
                global_flag = True


    # Loop over the base models.
    for model_info in model_loop():
        # Print out.
        print("\n")
        desc = model_desc(model_info)
        if desc:
            print(desc)

        # Initial model.
        best_model = None
        best_crit = 1e300
        data = []

        # Loop over the pipes.
        for j in range(len(pipes)):
            # Single-item-out cross validation.
            if method == 'CV':
                # Sum of chi-squared values.
                sum_crit = 0.0

                # Loop over the validation samples and sum the chi-squared values.
                for k in range(len(pipes[j])):
                    # Alias the data pipe name.
                    pipe = pipes[j][k]

                    # Switch to this pipe.
                    switch(pipe)

                    # Skip function.
                    if skip_function[pipe](model_info):
                        continue

                    # Get the model statistics.
                    k, n, chi2 = model_statistics[pipe](model_info)

                    # Missing data sets.
                    if k == None or n == None or chi2 == None:
                        continue

                    # Chi2 sum.
                    sum_crit = sum_crit + chi2

                # Cross-validation criterion (average chi-squared value).
                crit = sum_crit / float(len(pipes[j]))

            # Other model selection methods.
            else:
                # Reassign the pipe.
                pipe = pipes[j]

                # Switch to this pipe.
                switch(pipe)

                # Skip function.
                if skip_function[pipe](model_info):
                    continue

                # Get the model statistics.
                k, n, chi2 = model_statistics[pipe](model_info, global_stats=global_flag)

                # Missing data sets.
                if k == None or n == None or chi2 == None:
                    continue

                # Calculate the criterion value.
                crit = formula(chi2, float(k), float(n))

                # Store the values for a later printout.
                data.append([pipe, repr(k), repr(n), "%.5f" % chi2, "%.5f" % crit])

            # Select model.
            if crit < best_crit:
                best_model = pipe
                best_crit = crit

        # Write out the table.
        write_data(out=sys.stdout, headings=["Data pipe", "Num_params_(k)", "Num_data_sets_(n)", "Chi2", "Criterion"], data=data)

        # Duplicate the data from the 'best_model' to the model selection data pipe.
        if best_model != None:
            # Print out of selected model.
            print("The model from the data pipe " + repr(best_model) + " has been selected.")

            # Switch to the selected data pipe.
            switch(best_model)

            # Duplicate.
            duplicate_data[best_model](best_model, modsel_pipe, model_info, global_stats=global_flag, verbose=False)

            # Model selection pipe now exists.
            modsel_pipe_exists = True

        # No model selected.
        else:
            # Print out of selected model.
            print("No model has been selected.")

    # Switch to the model selection pipe.
    if modsel_pipe_exists:
        switch(modsel_pipe)

    # Bundle the data pipe.
    if bundle:
        pipe_control.pipes.bundle(bundle=bundle, pipe=modsel_pipe)
Esempio n. 10
0
    def delete_analysis(self, index, reset=False):
        """Delete the analysis tab and data store corresponding to the index.

        The order of these operations is very important due to the notification of observer objects and the updates, synchronisations, etc. that follow.  If the program debugging mode is on, then printouts at each stage will occur to allow the following of the code and observer object notifications.


        @param index:   The index of the analysis to delete.
        @type index:    int
        """

        # Debugging set up.
        if status.debug:
            fn_name = sys._getframe().f_code.co_name
            mod_name = inspect.getmodule(inspect.stack()[1][0]).__name__
            class_name = self.__class__.__name__
            full_name = "%s.%s.%s" % (mod_name, class_name, fn_name)
            print("\n\n")
            print("debug> %s:  Deleting the analysis at index %s." % (full_name, index))

        # Decrement the number of analyses.
        self._num_analyses -= 1

        # Shift the current page back one if necessary.
        if self._current > index:
            self._current -= 1
            if status.debug:
                print("debug> %s:  Switching the current analysis to index %s." % (full_name, self._current))

        # Execute the analysis delete method, if it exists.
        if hasattr(self._analyses[index], 'delete'):
            if status.debug:
                print("debug> %s:  Executing the analysis specific delete() method." % full_name)
            self._analyses[index].delete()
        wx.Yield()

        # Delete the tab.
        if status.debug:
            print("debug> %s:  Deleting the notebook page." % full_name)
        self.notebook.DeletePage(index)

        # Delete the tab object.
        if status.debug:
            print("debug> %s:  Deleting the analysis GUI object." % full_name)
        self._analyses.pop(index)

        # Data store clean up.
        if not reset:
            # Store the pipe bundle.
            pipe_bundle = ds.relax_gui.analyses[index].pipe_bundle

            # Delete the data store object.
            if status.debug:
                print("debug> %s:  Deleting the data store object." % full_name)
            ds.relax_gui.analyses.pop(index)

            # Delete all data pipes associated with the analysis.
            for pipe in pipes.pipe_names():
                if pipes.get_bundle(pipe) == pipe_bundle:
                    if status.debug:
                        print("debug> %s:  Deleting the data pipe '%s' from the '%s' bundle." % (full_name, pipe, pipe_bundle))
                    pipes.delete(pipe)

        # No more analyses, so in the initial state.
        if self._num_analyses == 0:
            if status.debug:
                print("debug> %s:  Setting the initial state." % full_name)
            self.set_init_state()

        # The current page has been deleted, so handle page switching to another page.
        elif index == self._current:
            # Default to the current page index - so that the switch is to the next page.
            page_index = self._current

            # Switch back one page.
            if self._num_analyses <= self._current:
                page_index = self._current - 1

            # Make the switch.
            if status.debug:
                print("debug> %s:  Switching to page %s." % (full_name, page_index))
            self.switch_page(page_index)

        # Notify the observers of the change.
        status.observers.gui_analysis.notify()
Esempio n. 11
0
    def delete_analysis(self, index):
        """Delete the analysis tab and data store corresponding to the index.

        The order of these operations is very important due to the notification of observer objects and the updates, synchronisations, etc. that follow.  If the program debugging mode is on, then printouts at each stage will occur to allow the following of the code and observer object notifications.


        @param index:   The index of the analysis to delete.
        @type index:    int
        """

        # Debugging set up.
        if status.debug:
            fn_name = sys._getframe().f_code.co_name
            mod_name = inspect.getmodule(inspect.stack()[1][0]).__name__
            class_name = self.__class__.__name__
            full_name = "%s.%s.%s" % (mod_name, class_name, fn_name)
            print("\n\n")
            print("debug> %s:  Deleting the analysis at index %s." % (full_name, index))

        # Decrement the number of analyses.
        self._num_analyses -= 1

        # Shift the current page back one if necessary.
        if self._current > index:
            self._current -= 1
            if status.debug:
                print("debug> %s:  Switching the current analysis to index %s." % (full_name, self._current))

        # Execute the analysis delete method, if it exists.
        if hasattr(self._analyses[index], 'delete'):
            if status.debug:
                print("debug> %s:  Executing the analysis specific delete() method." % full_name)
            self._analyses[index].delete()

        # Delete the tab.
        if status.debug:
            print("debug> %s:  Deleting the notebook page." % full_name)
        self.notebook.DeletePage(index)

        # Delete the tab object.
        if status.debug:
            print("debug> %s:  Deleting the analysis GUI object." % full_name)
        self._analyses.pop(index)

        # The current page has been deleted, so switch one back (if possible).
        if index == self._current and self._current != 0:
            if status.debug:
                print("debug> %s:  Switching to page %s." % (full_name, self._current-1))
            self.switch_page(self._current-1)

        # No more analyses, so in the initial state.
        if self._num_analyses == 0:
            if status.debug:
                print("debug> %s:  Setting the initial state." % full_name)
            self.set_init_state()

        # Notify the observers of the change.
        status.observers.gui_analysis.notify()

        # Store the pipe bundle.
        pipe_bundle = ds.relax_gui.analyses[index].pipe_bundle

        # Delete the data store object.
        if status.debug:
            print("debug> %s:  Deleting the data store object." % full_name)
        ds.relax_gui.analyses.pop(index)

        # Delete all data pipes associated with the analysis.
        for pipe in pipes.pipe_names():
            if pipes.get_bundle(pipe) == pipe_bundle:
                if status.debug:
                    print("debug> %s:  Deleting the data pipe '%s' from the '%s' bundle." % (full_name, pipe, pipe_bundle))
                pipes.delete(pipe)
    def execute(self):
        """Execute the protocol."""

        # MI - Local tm.
        ################

        if self.diff_model == 'local_tm':
            # Base directory to place files into.
            self.base_dir = self.results_dir+'local_tm'+sep

            # Sequential optimisation of all model-free models (function must be modified to suit).
            self.multi_model(local_tm=True)

            # Model selection.
            self.model_selection(modsel_pipe=self.name_pipe('aic'), dir=self.base_dir + 'aic')


        # Diffusion models MII to MV.
        #############################

        elif self.diff_model == 'sphere' or self.diff_model == 'prolate' or self.diff_model == 'oblate' or self.diff_model == 'ellipsoid':
            # No local_tm directory!
            dir_list = listdir(self.results_dir)
            if 'local_tm' not in dir_list:
                raise RelaxError("The local_tm model must be optimised first.")

            # The initial round of optimisation - not zero if calculations were interrupted.
            self.start_round = self.determine_rnd(model=self.diff_model)

            # Loop until convergence if conv_loop is set, otherwise just loop once.
            # This looping could be made much cleaner by removing the dependence on the determine_rnd() function.
            while True:
                # Determine which round of optimisation to do (init, round_1, round_2, etc).
                self.round = self.determine_rnd(model=self.diff_model)
                status.auto_analysis[self.pipe_bundle].round = self.round

                # Inital round of optimisation for diffusion models MII to MV.
                if self.round == 0:
                    # Base directory to place files into.
                    self.base_dir = self.results_dir+self.diff_model+sep+'init'+sep

                    # Run name.
                    name = self.name_pipe(self.diff_model)

                    # Create the data pipe (deleting the old one if it exists).
                    if has_pipe(name):
                        self.interpreter.pipe.delete(name)
                    self.interpreter.pipe.create(name, 'mf', bundle=self.pipe_bundle)

                    # Load the local tm diffusion model MI results.
                    self.interpreter.results.read(file='results', dir=self.results_dir+'local_tm'+sep+'aic')

                    # Remove the tm parameter.
                    self.interpreter.model_free.remove_tm()

                    # Add an arbitrary diffusion tensor which will be optimised.
                    if self.diff_model == 'sphere':
                        self.interpreter.diffusion_tensor.init(10e-9, fixed=False)
                        inc = self.diff_tensor_grid_inc['sphere']
                    elif self.diff_model == 'prolate':
                        self.interpreter.diffusion_tensor.init((10e-9, 0, 0, 0), spheroid_type='prolate', fixed=False)
                        inc = self.diff_tensor_grid_inc['prolate']
                    elif self.diff_model == 'oblate':
                        self.interpreter.diffusion_tensor.init((10e-9, 0, 0, 0), spheroid_type='oblate', fixed=False)
                        inc = self.diff_tensor_grid_inc['oblate']
                    elif self.diff_model == 'ellipsoid':
                        self.interpreter.diffusion_tensor.init((10e-09, 0, 0, 0, 0, 0), fixed=False)
                        inc = self.diff_tensor_grid_inc['ellipsoid']

                    # Minimise just the diffusion tensor.
                    self.interpreter.fix('all_spins')
                    self.interpreter.grid_search(inc=inc)
                    self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)

                    # Write the results.
                    self.interpreter.results.write(file='results', dir=self.base_dir, force=True)


                # Normal round of optimisation for diffusion models MII to MV.
                else:
                    # Base directory to place files into.
                    self.base_dir = self.results_dir+self.diff_model + sep+'round_'+repr(self.round)+sep

                    # Load the optimised diffusion tensor from either the previous round.
                    self.load_tensor()

                    # Sequential optimisation of all model-free models (function must be modified to suit).
                    self.multi_model()

                    # Model selection.
                    self.model_selection(modsel_pipe=self.name_pipe('aic'), dir=self.base_dir + 'aic')

                    # Final optimisation of all diffusion and model-free parameters.
                    self.interpreter.fix('all', fixed=False)

                    # Minimise all parameters.
                    self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)

                    # Write the results.
                    dir = self.base_dir + 'opt'
                    self.interpreter.results.write(file='results', dir=dir, force=True)

                    # Test for convergence.
                    converged = self.convergence()

                    # Break out of the infinite while loop if automatic looping is not activated or if convergence has occurred.
                    if converged or not self.conv_loop:
                        break

            # Unset the status.
            status.auto_analysis[self.pipe_bundle].round = None


        # Final run.
        ############

        elif self.diff_model == 'final':
            # Diffusion model selection.
            ############################

            # The contents of the results directory.
            dir_list = listdir(self.results_dir)

            # Check that the minimal set of global diffusion models required for the protocol has been optimised.
            min_models = ['local_tm', 'sphere']
            for model in min_models:
                if model not in dir_list:
                    raise RelaxError("The minimum set of global diffusion models required for the protocol have not been optimised, the '%s' model results cannot be found." % model)

            # Build a list of all global diffusion models optimised.
            all_models = ['local_tm', 'sphere', 'prolate', 'oblate', 'ellipsoid']
            self.opt_models = []
            self.pipes = []
            for model in all_models:
                if model in dir_list:
                    self.opt_models.append(model)
                    self.pipes.append(self.name_pipe(model))

            # Remove all temporary pipes used in this auto-analysis.
            for name in pipe_names(bundle=self.pipe_bundle):
                if name in self.pipes + self.mf_model_pipes + self.local_tm_model_pipes + [self.name_pipe('aic'), self.name_pipe('previous')]:
                    self.interpreter.pipe.delete(name)

            # Create the local_tm data pipe.
            self.interpreter.pipe.create(self.name_pipe('local_tm'), 'mf', bundle=self.pipe_bundle)

            # Load the local tm diffusion model MI results.
            self.interpreter.results.read(file='results', dir=self.results_dir+'local_tm'+sep+'aic')

            # Loop over models MII to MV.
            for model in ['sphere', 'prolate', 'oblate', 'ellipsoid']:
                # Skip missing models.
                if model not in self.opt_models:
                    continue

                # Determine which was the last round of optimisation for each of the models.
                self.round = self.determine_rnd(model=model) - 1

                # If no directories begining with 'round_' exist, the script has not been properly utilised!
                if self.round < 1:
                    # Construct the name of the diffusion tensor.
                    name = model
                    if model == 'prolate' or model == 'oblate':
                        name = name + ' spheroid'

                    # Throw an error to prevent misuse of the script.
                    raise RelaxError("Multiple rounds of optimisation of the " + name + " (between 8 to 15) are required for the proper execution of this script.")

                # Create the data pipe.
                self.interpreter.pipe.create(self.name_pipe(model), 'mf', bundle=self.pipe_bundle)

                # Load the diffusion model results.
                self.interpreter.results.read(file='results', dir=self.results_dir+model + sep+'round_'+repr(self.round)+sep+'opt')

            # Model selection between MI to MV.
            self.model_selection(modsel_pipe=self.name_pipe('final'), write_flag=False)


            # Monte Carlo simulations.
            ##########################

            # Fix the diffusion tensor, if it exists.
            if hasattr(get_pipe(self.name_pipe('final')), 'diff_tensor'):
                self.interpreter.fix('diff')

            # Simulations.
            self.interpreter.monte_carlo.setup(number=self.mc_sim_num)
            self.interpreter.monte_carlo.create_data()
            self.interpreter.monte_carlo.initial_values()
            self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)
            self.interpreter.eliminate()
            self.interpreter.monte_carlo.error_analysis()


            # Write the final results.
            ##########################

            # Create results files and plots of the data.
            self.write_results()


        # Unknown script behaviour.
        ###########################

        else:
            raise RelaxError("Unknown diffusion model, change the value of 'self.diff_model'")
Esempio n. 13
0
def select(method=None, modsel_pipe=None, bundle=None, pipes=None):
    """Model selection function.

    @keyword method:        The model selection method.  This can currently be one of:
                                - 'AIC', Akaike's Information Criteria.
                                - 'AICc', Small sample size corrected AIC.
                                - 'BIC', Bayesian or Schwarz Information Criteria.
                                - 'CV', Single-item-out cross-validation.
                            None of the other model selection techniques are currently supported.
    @type method:           str
    @keyword modsel_pipe:   The name of the new data pipe to be created by copying of the selected data pipe.
    @type modsel_pipe:      str
    @keyword bundle:        The optional data pipe bundle to associate the newly created pipe with.
    @type bundle:           str or None
    @keyword pipes:         A list of the data pipes to use in the model selection.
    @type pipes:            list of str
    """

    # Test if the pipe already exists.
    if has_pipe(modsel_pipe):
        raise RelaxPipeError(modsel_pipe)

    # Use all pipes.
    if pipes == None:
        # Get all data pipe names from the relax data store.
        pipes = pipe_names()

    # Select the model selection technique.
    if method == 'AIC':
        print("AIC model selection.")
        formula = aic
    elif method == 'AICc':
        print("AICc model selection.")
        formula = aicc
    elif method == 'BIC':
        print("BIC model selection.")
        formula = bic
    elif method == 'CV':
        print("CV model selection.")
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")
    else:
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")

    # No pipes.
    if len(pipes) == 0:
        raise RelaxError("No data pipes are available for use in model selection.")

    # Initialise.
    function_type = {}
    model_loop = {}
    model_type = {}
    duplicate_data = {}
    model_statistics = {}
    skip_function = {}
    modsel_pipe_exists = False

    # Cross validation setup.
    if isinstance(pipes[0], list):
        # No pipes.
        if len(pipes[0]) == 0:
            raise RelaxError("No pipes are available for use in model selection in the array " + repr(pipes[0]) + ".")

        # Loop over the data pipes.
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                # The specific analysis API object.
                api = return_api(pipe_name=pipes[i][j])

                # Store the specific functions.
                model_loop[pipes[i][j]] = api.model_loop
                model_type[pipes[i][j]] = api.model_type
                duplicate_data[pipes[i][j]] = api.duplicate_data
                model_statistics[pipes[i][j]] = api.model_statistics
                skip_function[pipes[i][j]] = api.skip_function

        # The model loop should be the same for all data pipes!
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_loop[pipes[0][j]] != model_loop[pipes[i][j]]:
                    raise RelaxError("The models for each data pipes should be the same.")

        # Alias some function from the specific API of the first data pipe.
        api = return_api(pipe_name=pipes[0][0])
        model_loop = api.model_loop
        model_desc = api.model_desc

        # Global vs. local models.
        global_flag = False
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_type[pipes[i][j]]() == 'global':
                    global_flag = True

    # All other model selection setup.
    else:
        # Loop over the data pipes.
        for i in range(len(pipes)):
            # The specific analysis API object.
            api = return_api()

            # Store the specific functions.
            model_loop[pipes[i]] = api.model_loop
            model_type[pipes[i]] = api.model_type
            duplicate_data[pipes[i]] = api.duplicate_data
            model_statistics[pipes[i]] = api.model_statistics
            skip_function[pipes[i]] = api.skip_function

        # Alias some function from the specific API of the first data pipe.
        api = return_api(pipe_name=pipes[0])
        model_loop = api.model_loop
        model_desc = api.model_desc

        # Global vs. local models.
        global_flag = False
        for j in range(len(pipes)):
            if model_type[pipes[j]]() == 'global':
                global_flag = True


    # Loop over the base models.
    for model_info in model_loop():
        # Print out.
        print("\n")
        desc = model_desc(model_info)
        if desc:
            print(desc)

        # Initial model.
        best_model = None
        best_crit = 1e300
        data = []

        # Loop over the pipes.
        for j in range(len(pipes)):
            # Single-item-out cross validation.
            if method == 'CV':
                # Sum of chi-squared values.
                sum_crit = 0.0

                # Loop over the validation samples and sum the chi-squared values.
                for k in range(len(pipes[j])):
                    # Alias the data pipe name.
                    pipe = pipes[j][k]

                    # Switch to this pipe.
                    switch(pipe)

                    # Skip function.
                    if skip_function[pipe](model_info):
                        continue

                    # Get the model statistics.
                    k, n, chi2 = model_statistics[pipe](model_info)

                    # Missing data sets.
                    if k == None or n == None or chi2 == None:
                        continue

                    # Chi2 sum.
                    sum_crit = sum_crit + chi2

                # Cross-validation criterion (average chi-squared value).
                crit = sum_crit / float(len(pipes[j]))

            # Other model selection methods.
            else:
                # Reassign the pipe.
                pipe = pipes[j]

                # Switch to this pipe.
                switch(pipe)

                # Skip function.
                if skip_function[pipe](model_info):
                    continue

                # Get the model statistics.
                k, n, chi2 = model_statistics[pipe](model_info, global_stats=global_flag)

                # Missing data sets.
                if k == None or n == None or chi2 == None:
                    continue

                # Calculate the criterion value.
                crit = formula(chi2, float(k), float(n))

                # Store the values for a later printout.
                data.append([pipe, repr(k), repr(n), "%.5f" % chi2, "%.5f" % crit])

            # Select model.
            if crit < best_crit:
                best_model = pipe
                best_crit = crit

        # Write out the table.
        write_data(out=sys.stdout, headings=["Data pipe", "Num_params_(k)", "Num_data_sets_(n)", "Chi2", "Criterion"], data=data)

        # Duplicate the data from the 'best_model' to the model selection data pipe.
        if best_model != None:
            # Print out of selected model.
            print("The model from the data pipe " + repr(best_model) + " has been selected.")

            # Switch to the selected data pipe.
            switch(best_model)

            # Duplicate.
            duplicate_data[best_model](best_model, modsel_pipe, model_info, global_stats=global_flag, verbose=False)

            # Model selection pipe now exists.
            modsel_pipe_exists = True

        # No model selected.
        else:
            # Print out of selected model.
            print("No model has been selected.")

    # Switch to the model selection pipe.
    if modsel_pipe_exists:
        switch(modsel_pipe)

    # Bundle the data pipe.
    if bundle:
        pipe_control.pipes.bundle(bundle=bundle, pipe=modsel_pipe)

    # Update all of the required metadata structures.
    mol_res_spin.metadata_update()
    interatomic.metadata_update()