Exemple #1
0
class Dummy_class:
    def __init__(self):
        # Some dummy data structures.
        self.script_file = None
        self.intro_string = ''
        self.dummy_mode = 1

        # Start the interpreter!
        self.interpreter = Interpreter(self)
        self.interpreter.run()

        # Get the names of the data structures.
        names = sorted(self.local.keys())

        # Alphabetically sort the names of the data structures.

        # Alphabetically sort the names of the data structures.
        for name in names:
            # Skip the name if it is in the blacklist.
            blacklist = [
                'Numeric', 'Scientific', 'intro_off', 'intro_on', 'pi',
                'script'
            ]
            if name in blacklist:
                continue

            # Get the object.
            object = self.local[name]

            # Determine if the structure is user function containing class.
            if hasattr(object, '__relax_help__'):
                # Get the names of the data structures.
                names2 = sorted(dir(object))

                # Alphabetically sort the names of the data structures.
                for name2 in names2:
                    # Skip names begining with an underscore.
                    if search('^_', name2):
                        continue

                    # Get the object.
                    object2 = getattr(object, name2)

                    # Skip the object if there is no docstring.
                    if not hasattr(object2, '__doc__') or not object2.__doc__:
                        continue

                    # Printout.
                    print(name + '.' + name2)

                # Done.
                continue

            # Skip the object if there is no docstring.
            if not hasattr(object, '__doc__') or not object.__doc__:
                continue

            # Print the name.
            print(name)
Exemple #2
0
class Test_relax_fit(TestCase):
    """Unit tests for the functions of the 'prompt.relax_fit' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_relax_fit, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.relax_fit_fns = self.interpreter.relax_fit

    def test_relax_time_argfail_time(self):
        """The time arg test of the relax_fit.relax_time() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, bin, and int arguments, and skip them.
            if data[0] == 'float' or data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError,
                              self.relax_fit_fns.relax_time,
                              time=data[1])

    def test_relax_time_argfail_spectrum_id(self):
        """The spectrum_id arg test of the relax_fit.relax_time() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_fit_fns.relax_time,
                              spectrum_id=data[1])

    def test_select_model_argfail_model(self):
        """The model arg test of the relax_fit.select_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_fit_fns.select_model,
                              model=data[1])
Exemple #3
0
class Dummy_class:
    def __init__(self):
        # Some dummy data structures.
        self.script_file = None
        self.intro_string = ''
        self.dummy_mode = 1

        # Start the interpreter!
        self.interpreter = Interpreter(self)
        self.interpreter.run()

        # Get the names of the data structures.
        names = sorted(self.local.keys())

        # Alphabetically sort the names of the data structures.

        # Alphabetically sort the names of the data structures.
        for name in names:
            # Skip the name if it is in the blacklist.
            blacklist = ['Numeric', 'Scientific', 'intro_off', 'intro_on', 'pi', 'script']
            if name in blacklist:
                continue

            # Get the object.
            object = self.local[name]

            # Determine if the structure is user function containing class.
            if hasattr(object, '__relax_help__'):
                # Get the names of the data structures.
                names2 = sorted(dir(object))

                # Alphabetically sort the names of the data structures.
                for name2 in names2:
                    # Skip names begining with an underscore.
                    if search('^_', name2):
                        continue

                    # Get the object.
                    object2 = getattr(object, name2)

                    # Skip the object if there is no docstring.
                    if not hasattr(object2, '__doc__') or not object2.__doc__:
                        continue

                    # Printout.
                    print(name + '.' + name2)

                # Done.
                continue

            # Skip the object if there is no docstring.
            if not hasattr(object, '__doc__') or not object.__doc__:
                continue

            # Print the name.
            print(name)
Exemple #4
0
class Test_value(Value_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.value' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_value, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.value_fns = self.interpreter.value


    def test_set_argfail_val(self):
        """The val arg test of the value.set() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, float, int, str, bin, float list, int list, str list, or bin list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'bin' or data[0] == 'bool' or data[0] == 'int' or data[0] == 'str' or data[0] == 'float' or data[0] == 'int list' or data[0] == 'bin list' or data[0] == 'bool list' or data[0] == 'str list' or data[0] == 'float list' or data[0] == 'number list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneValListValError, self.value_fns.set, val=data[1], param='csa')


    def test_set_argfail_param(self):
        """The param arg test of the value.set() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, str, and str list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str' or data[0] == 'str list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrListStrError, self.value_fns.set, param=data[1], val=None)


    def test_set_argfail_spin_id(self):
        """The spin_id arg test of the value.set() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.value_fns.set, spin_id=data[1])
Exemple #5
0
class Test_relax_fit(TestCase):
    """Unit tests for the functions of the 'prompt.relax_fit' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_relax_fit, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.relax_fit_fns = self.interpreter.relax_fit


    def test_relax_time_argfail_time(self):
        """The time arg test of the relax_fit.relax_time() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, bin, and int arguments, and skip them.
            if data[0] == 'float' or data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError, self.relax_fit_fns.relax_time, time=data[1])


    def test_relax_time_argfail_spectrum_id(self):
        """The spectrum_id arg test of the relax_fit.relax_time() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.relax_fit_fns.relax_time, spectrum_id=data[1])


    def test_select_model_argfail_model(self):
        """The model arg test of the relax_fit.select_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.relax_fit_fns.select_model, model=data[1])
Exemple #6
0
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_align_tensor, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.align_tensor_fns = self.interpreter.align_tensor
Exemple #7
0
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_eliminate, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Place the user functions into a container.
        self.eliminate_fns = Container()
        self.eliminate_fns.eliminate = self.interpreter.eliminate
Exemple #8
0
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the TestCase __init__ method.
        super(SystemTestCase, self).__init__(methodName)

        # A string used for classifying skipped tests.
        if not hasattr(self, '_skip_type'):
            self._skip_type = 'system'

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)
Exemple #9
0
    def run(self, save_path=None):
        """Generate the distribution and alignment data.

        @keyword save_path: The path to place the files into.  If set to None, then the current path will be used.
        @type save_path:    None or str
        """

        # The paths to the files.
        self.path = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'frame_order'+sep+'cam'+sep
        self.save_path = save_path
        if self.save_path == None:
            self.save_path = getcwd()

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Set up for the progress meter (commas between the thousands).
        try:
            locale.setlocale(locale.LC_ALL, 'en_US')
        except:
            pass

        # Build the axis system.
        self.build_axes()
        self.print_axis_system()
        self.axes_to_pdb()

        # Set up the system.
        self._multi_system()

        # Set up the data pipe.
        self._pipe_setup()

        # Calculate the RDC data.
        self._calculate_rdc()

        # Calculate the PCS data.
        self._calculate_pcs()

        # Create the distribution of structures.
        if self.DIST_PDB:
            self._create_distribution()

        # Save a state file for debugging.
        if self.SAVE_STATE:
            self.interpreter.state.save('generate_distribution', dir=self.save_path, force=True)
    def run(self, save_path=None):
        """Generate the distribution and alignment data.
        
        @keyword save_path: The path to place the files into.  If set to None, then the current path will be used.
        @type save_path:    None or str
        """

        # The paths to the files.
        self.path = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'frame_order'+sep+'cam'+sep
        self.save_path = save_path
        if self.save_path == None:
            self.save_path = getcwd()

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Build the axis system.
        self.build_axes()
        self._print_axis_system()
        self.axes_to_pdb()

        # Create the distribution.
        self._create_distribution()

        # Back-calculate the RDCs and PCSs.
        self._back_calc()

        # Save a state file for debugging.
        self.interpreter.state.save('generate_distribution', dir=self.save_path, force=True)
Exemple #11
0
class Test_eliminate(TestCase):
    """Unit tests for the functions of the 'prompt.eliminate' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_eliminate, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Place the user functions into a container.
        self.eliminate_fns = Container()
        self.eliminate_fns.eliminate = self.interpreter.eliminate

    def test_eliminate_function(self):
        """The function arg unit test of the eliminate() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and func arguments, and skip them.
            if data[0] == 'None' or data[0] == 'function':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneFunctionError,
                              self.eliminate_fns.eliminate,
                              function=data[1])

    def test_eliminate_args(self):
        """The args arg unit test of the eliminate() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and tuple arguments, and skip them.
            if data[0] == 'None' or data[0] == 'tuple' or data[
                    0] == 'float tuple' or data[0] == 'str tuple':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneTupleError,
                              self.eliminate_fns.eliminate,
                              function=dummy_function,
                              args=data[1])
Exemple #12
0
    def __init__(self, pipe_name=None, pipe_bundle=None, file_root='noe', results_dir=None, save_state=True):
        """Perform relaxation curve fitting.

        To use this auto-analysis, a data pipe with all the required data needs to be set up.  This data pipe should contain the following:

            - All the spins loaded.
            - Unresolved spins deselected.
            - The NOE peak intensities from the saturated and reference spectra.
            - Either the baseplane noise RMDS values should be set or replicated spectra loaded.

        @keyword pipe_name:     The name of the data pipe containing all of the data for the analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:      str
        @keyword file_root:     File root of the output filea.
        @type file_root:        str
        @keyword results_dir:   The directory where results files are saved.
        @type results_dir:      str
        @keyword save_state:    A flag which if True will cause a relax save state to be created at the end of the analysis.
        @type save_state:       bool
        """

        # Execution lock.
        status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

        # Set up the analysis status object.
        status.init_auto_analysis(pipe_bundle, type='noe')
        status.current_analysis = pipe_bundle

        # Store the args.
        self.save_state = save_state
        self.pipe_name = pipe_name
        self.pipe_bundle = pipe_bundle
        self.file_root = file_root
        self.results_dir = results_dir
        if self.results_dir:
            self.grace_dir = results_dir + sep + 'grace'
        else:
            self.grace_dir = 'grace'

        # Data checks.
        self.check_vars()

        # Set the data pipe to the current data pipe.
        if self.pipe_name != cdp_name():
            switch(self.pipe_name)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Execute.
        self.run()

        # Finish and unlock execution.
        status.auto_analysis[self.pipe_bundle].fin = True
        status.current_analysis = None
        status.exec_lock.release()
Exemple #13
0
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_state, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.state = self.interpreter.state

        # Alias the user functions to work with the backend.
        self.state.load_state = self.state.load
        self.state.save_state = self.state.save
class Test_eliminate(TestCase):
    """Unit tests for the functions of the 'prompt.eliminate' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_eliminate, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Place the user functions into a container.
        self.eliminate_fns = Container()
        self.eliminate_fns.eliminate = self.interpreter.eliminate


    def test_eliminate_function(self):
        """The function arg unit test of the eliminate() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and func arguments, and skip them.
            if data[0] == 'None' or data[0] == 'function':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneFunctionError, self.eliminate_fns.eliminate, function=data[1])


    def test_eliminate_args(self):
        """The args arg unit test of the eliminate() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and tuple arguments, and skip them.
            if data[0] == 'None' or data[0] == 'tuple' or data[0] == 'float tuple' or data[0] == 'str tuple':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneTupleError, self.eliminate_fns.eliminate, function=dummy_function, args=data[1])
Exemple #15
0
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_spin, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.spin_fns = self.interpreter.spin
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_eliminate, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Place the user functions into a container.
        self.eliminate_fns = Container()
        self.eliminate_fns.eliminate = self.interpreter.eliminate
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the TestCase __init__ method.
        super(SystemTestCase, self).__init__(methodName)

        # A string used for classifying skipped tests.
        if not hasattr(self, '_skip_type'):
            self._skip_type = 'system'

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)
Exemple #18
0
    def run(self, save_path=None):
        """Generate the distribution and alignment data.

        @keyword save_path: The path to place the files into.  If set to None, then the current path will be used.
        @type save_path:    None or str
        """

        # The paths to the files.
        self.path = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'frame_order'+sep+'cam'+sep
        self.save_path = save_path
        if self.save_path == None:
            self.save_path = getcwd()

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Set up for the progress meter (commas between the thousands).
        try:
            locale.setlocale(locale.LC_ALL, 'en_US')
        except:
            pass

        # Build the axis system.
        self.build_axes()
        self.print_axis_system()
        self.axes_to_pdb()

        # Set up the system.
        self._multi_system()

        # Set up the data pipe.
        self._pipe_setup()

        # Calculate the RDC data.
        self._calculate_rdc()

        # Calculate the PCS data.
        self._calculate_pcs()

        # Create the distribution of structures.
        if self.DIST_PDB:
            self._create_distribution()

        # Save a state file for debugging.
        if self.SAVE_STATE:
            self.interpreter.state.save('generate_distribution', dir=self.save_path, force=True)
class SystemTestCase(TestCase):
    """The system test base class."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the TestCase __init__ method.
        super(SystemTestCase, self).__init__(methodName)

        # A string used for classifying skipped tests.
        if not hasattr(self, '_skip_type'):
            self._skip_type = 'system'

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)


    def script_exec(self, script):
        """Execute a relax script within the system test framework.

        @param script:  The full path of the script to execute.
        @type script:   str
        """

        # Execute the script.
        self.interpreter.run(script_file=script)


    def tearDown(self):
        """Default tearDown operation - delete temp directories and files and reset relax."""

        # Remove the temporary directory and variable (if there is a deletion failure, continue to allow the test suite to survive).
        try:
            deletion(obj=ds, name='tmpdir', dir=True)
        except:
            pass
        try:
            deletion(obj=self, name='tmpdir', dir=True)
        except:
            pass

        # Remove temporary file and variable (if there is a deletion failure, continue to allow the test suite to survive).
        try:
            deletion(obj=ds, name='tmpfile', dir=False)
        except:
            pass
        try:
            deletion(obj=self, name='tmpfile', dir=False)
        except:
            pass

        # Reset relax.
        reset()
Exemple #20
0
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_state, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.state = self.interpreter.state

        # Alias the user functions to work with the backend.
        self.state.load_state = self.state.load
        self.state.save_state = self.state.save
Exemple #21
0
class SystemTestCase(TestCase):
    """The system test base class."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the TestCase __init__ method.
        super(SystemTestCase, self).__init__(methodName)

        # A string used for classifying skipped tests.
        if not hasattr(self, '_skip_type'):
            self._skip_type = 'system'

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

    def script_exec(self, script):
        """Execute a relax script within the system test framework.

        @param script:  The full path of the script to execute.
        @type script:   str
        """

        # Execute the script.
        self.interpreter.run(script_file=script)

    def tearDown(self):
        """Default tearDown operation - delete temp directories and files and reset relax."""

        # Remove the temporary directory and variable (if there is a deletion failure, continue to allow the test suite to survive).
        try:
            deletion(obj=ds, name='tmpdir', dir=True)
        except:
            pass
        try:
            deletion(obj=self, name='tmpdir', dir=True)
        except:
            pass

        # Remove temporary file and variable (if there is a deletion failure, continue to allow the test suite to survive).
        try:
            deletion(obj=ds, name='tmpfile', dir=False)
        except:
            pass
        try:
            deletion(obj=self, name='tmpfile', dir=False)
        except:
            pass

        # Reset relax.
        reset()
Exemple #22
0
    def __init__(self,
                 pipe_name=None,
                 pipe_bundle=None,
                 results_dir=None,
                 models=[MODEL_R2EFF],
                 grid_inc=11,
                 mc_sim_num=500,
                 exp_mc_sim_num=None,
                 modsel='AIC',
                 pre_run_dir=None,
                 optimise_r2eff=False,
                 insignificance=0.0,
                 numeric_only=False,
                 mc_sim_all_models=False,
                 eliminate=True,
                 set_grid_r20=False,
                 r1_fit=False):
        """Perform a full relaxation dispersion analysis for the given list of models.

        @keyword pipe_name:                 The name of the data pipe containing all of the data for the analysis.
        @type pipe_name:                    str
        @keyword pipe_bundle:               The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:                  str
        @keyword results_dir:               The directory where results files are saved.
        @type results_dir:                  str
        @keyword models:                    The list of relaxation dispersion models to optimise.
        @type models:                       list of str
        @keyword grid_inc:                  Number of grid search increments.  If set to None, then the grid search will be turned off and the default parameter values will be used instead.
        @type grid_inc:                     int or None
        @keyword mc_sim_num:                The number of Monte Carlo simulations to be used for error analysis at the end of the analysis.
        @type mc_sim_num:                   int
        @keyword exp_mc_sim_num:            The number of Monte Carlo simulations for the error analysis in the 'R2eff' model when exponential curves are fitted.  This defaults to the value of the mc_sim_num argument when not given.  When set to '-1', the R2eff errors are estimated from the Covariance matrix.  For the 2-point fixed-time calculation for the 'R2eff' model, this argument is ignored.
        @type exp_mc_sim_num:               int or None
        @keyword modsel:                    The model selection technique to use in the analysis to determine which model is the best for each spin cluster.  This can currently be one of 'AIC', 'AICc', and 'BIC'.
        @type modsel:                       str
        @keyword pre_run_dir:               The optional directory containing the dispersion auto-analysis results from a previous run.  The optimised parameters from these previous results will be used as the starting point for optimisation rather than performing a grid search.  This is essential for when large spin clusters are specified, as a grid search becomes prohibitively expensive with clusters of three or more spins.  At some point a RelaxError will occur because the grid search is impossibly large.  For the cluster specific parameters, i.e. the populations of the states and the exchange parameters, an average value will be used as the starting point.  For all other parameters, the R20 values for each spin and magnetic field, as well as the parameters related to the chemical shift difference dw, the optimised values of the previous run will be directly copied.
        @type pre_run_dir:                  None or str
        @keyword optimise_r2eff:            Flag to specify if the read previous R2eff results should be optimised.  For R1rho models where the error of R2eff values are determined by Monte-Carlo simulations, it can be valuable to make an initial R2eff run with a high number of Monte-Carlo simulations.  Any subsequent model analysis can then be based on these R2eff values, without optimising the R2eff values.
        @type optimise_r2eff:               bool
        @keyword insignificance:            The R2eff/R1rho value in rad/s by which to judge insignificance.  If the maximum difference between two points on all dispersion curves for a spin is less than this value, that spin will be deselected.  This does not affect the 'No Rex' model.  Set this value to 0.0 to use all data.  The value will be passed on to the relax_disp.insignificance user function.
        @type insignificance:               float
        @keyword numeric_only:              The class of models to use in the model selection.  The default of False allows all dispersion models to be used in the analysis (no exchange, the analytic models and the numeric models).  The value of True will activate a pure numeric solution - the analytic models will be optimised, as they are very useful for replacing the grid search for the numeric models, but the final model selection will not include them.
        @type numeric_only:                 bool
        @keyword mc_sim_all_models:         A flag which if True will cause Monte Carlo simulations to be performed for each individual model.  Otherwise Monte Carlo simulations will be reserved for the final model.
        @type mc_sim_all_models:            bool
        @keyword eliminate:                 A flag which if True will enable the elimination of failed models and failed Monte Carlo simulations through the eliminate user function.
        @type eliminate:                    bool
        @keyword set_grid_r20:              A flag which if True will set the grid R20 values from the minimum R2eff values through the r20_from_min_r2eff user function. This will speed up the grid search with a factor GRID_INC^(Nr_spec_freq). For a CPMG experiment with two fields and standard GRID_INC=21, the speed-up is a factor 441.
        @type set_grid_r20:                 bool
        @keyword r1_fit:                    A flag which if True will activate R1 parameter fitting via relax_disp.r1_fit for the models that support it.  If False, then the relax_disp.r1_fit user function will not be called.
        """

        # Initial printout.
        title(file=sys.stdout,
              text="Relaxation dispersion auto-analysis",
              prespace=4)

        # Safely execute the full protocol.
        try:
            # Execution lock.
            status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

            # Set up the analysis status object.
            status.init_auto_analysis(pipe_bundle, type='relax_disp')
            status.current_analysis = pipe_bundle

            # Store the args.
            self.pipe_name = pipe_name
            self.pipe_bundle = pipe_bundle
            self.results_dir = results_dir
            self.grid_inc = grid_inc
            self.mc_sim_num = mc_sim_num
            self.exp_mc_sim_num = exp_mc_sim_num
            self.models = models
            self.modsel = modsel
            self.pre_run_dir = pre_run_dir
            self.optimise_r2eff = optimise_r2eff
            self.insignificance = insignificance
            self.set_grid_r20 = set_grid_r20
            self.numeric_only = numeric_only
            self.mc_sim_all_models = mc_sim_all_models
            self.eliminate = eliminate
            self.r1_fit = r1_fit

            # No results directory, so default to the current directory.
            if not self.results_dir:
                self.results_dir = getcwd()

            # Data checks.
            self.check_vars()

            # Check for numerical model using numpy version under 1.8.
            # This will result in slow "for loop" calculation through data, making the analysis 5-6 times slower.
            self.check_numpy_less_1_8_and_numerical_model()

            # Load the interpreter.
            self.interpreter = Interpreter(show_script=False,
                                           raise_relax_error=True)
            self.interpreter.populate_self()
            self.interpreter.on(verbose=False)

            # Execute.
            self.run()

        # Clean up.
        finally:
            # Final printout.
            title(file=sys.stdout,
                  text="Completion of the relaxation dispersion auto-analysis",
                  prespace=4)
            print_elapsed_time(time() - status.start_time)

            # Finish and unlock execution.
            status.auto_analysis[self.pipe_bundle].fin = True
            status.current_analysis = None
            status.exec_lock.release()
Exemple #23
0
class Test_relax_data(Relax_data_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.relax_data' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_relax_data, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.relax_data_fns = self.interpreter.relax_data

    def test_back_calc_argfail_ri_id(self):
        """The ri_id arg test of the relax_data.back_calc() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.back_calc,
                              ri_id=data[1])

    def test_back_calc_argfail_ri_type(self):
        """The ri_type arg test of the relax_data.back_calc() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.back_calc,
                              ri_id='R2',
                              ri_type=data[1])

    def test_back_calc_argfail_frq(self):
        """The frq arg test of the relax_data.back_calc() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the number arguments, and skip them.
            if data[0] == 'None' or data[0] == 'bin' or data[
                    0] == 'int' or data[0] == 'float':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError,
                              self.relax_data_fns.back_calc,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=data[1])

    def test_copy_argfail_pipe_from(self):
        """The pipe_from arg test of the relax_data.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.copy,
                              pipe_from=data[1])

    def test_copy_argfail_pipe_to(self):
        """The pipe_to arg test of the relax_data.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.copy,
                              pipe_from='',
                              pipe_to=data[1])

    def test_copy_argfail_both_pipes(self):
        """The pipe_from and pipe_to arg test of the relax_data.copy() user function."""

        # Test that both cannot be None (the default)!
        self.assertRaises(RelaxError, self.relax_data_fns.copy)

    def test_copy_argfail_ri_id(self):
        """The ri_id arg test of the relax_data.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.copy,
                              pipe_from='',
                              pipe_to='',
                              ri_id=data[1])

    def test_delete_argfail_ri_id(self):
        """The ri_id arg test of the relax_data.delete() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.delete,
                              ri_id=data[1])

    def test_display_argfail_ri_id(self):
        """The ri_id arg test of the relax_data.display() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.display,
                              ri_id=data[1])

    def test_read_argfail_ri_id(self):
        """The ri_id arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.read,
                              ri_id=data[1])

    def test_read_argfail_ri_type(self):
        """The ri_type arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.read,
                              ri_id='R2',
                              ri_type=data[1])

    def test_read_argfail_frq(self):
        """The frq arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the number arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int' or data[0] == 'float':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=data[1])

    def test_read_argfail_file(self):
        """The file arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the file and str arguments, and skip them.
            if data[0] in ['file', 'str']:
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=1e9,
                              file=data[1])

    def test_read_argfail_dir(self):
        """The dir arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=1e9,
                              file='R2_1000MHz',
                              dir=data[1])

    def test_read_argfail_mol_name_col(self):
        """The mol_name_col arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=1e9,
                              file='R2_1000MHz',
                              mol_name_col=data[1])

    def test_read_argfail_res_num_col(self):
        """The res_num_col arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=1e9,
                              file='R2_1000MHz',
                              res_num_col=data[1])

    def test_read_argfail_res_name_col(self):
        """The res_name_col arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=1e9,
                              file='R2_1000MHz',
                              res_name_col=data[1])

    def test_read_argfail_spin_num_col(self):
        """The spin_num_col arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=1e9,
                              file='R2_1000MHz',
                              spin_num_col=data[1])

    def test_read_argfail_spin_name_col(self):
        """The spin_name_col arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=1e9,
                              file='R2_1000MHz',
                              spin_name_col=data[1])

    def test_read_argfail_data_col(self):
        """The data_col arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=1e9,
                              file='R2_1000MHz',
                              data_col=data[1])

    def test_read_argfail_error_col(self):
        """The error_col arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=1e9,
                              file='R2_1000MHz',
                              error_col=data[1])

    def test_read_argfail_sep(self):
        """The sep arg test of the relax_data.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.read,
                              ri_id='R2_1000',
                              ri_type='R2',
                              frq=1e9,
                              file='R2_1000MHz',
                              data_col=0,
                              error_col=0,
                              sep=data[1])

    def test_write_argfail_ri_id(self):
        """The ri_id arg test of the relax_data.write() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.write,
                              ri_id=data[1])

    def test_write_argfail_file(self):
        """The file arg test of the relax_data.write() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the file and str arguments, and skip them.
            if data[0] in ['file', 'str']:
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileError,
                              self.relax_data_fns.write,
                              ri_id='R2_1000',
                              file=data[1])

    def test_write_argfail_dir(self):
        """The dir arg test of the relax_data.write() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.relax_data_fns.write,
                              ri_id='R2_1000',
                              file='a',
                              dir=data[1])

    def test_write_argfail_force(self):
        """The force arg test of the relax_data.write() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.relax_data_fns.write,
                              ri_id='R2_1000',
                              file='a',
                              force=data[1])
Exemple #24
0
class Test_structure(Structure_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.structure' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_structure, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.structure_fns = self.interpreter.structure

    def test_create_diff_tensor_pdb_argfail_scale(self):
        """The scale arg test of the structure.create_diff_tensor_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, bin, and int arguments, and skip them.
            if data[0] == 'float' or data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError,
                              self.structure_fns.create_diff_tensor_pdb,
                              scale=data[1])

    def test_create_diff_tensor_pdb_argfail_file(self):
        """The file arg test of the structure.create_diff_tensor_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.structure_fns.create_diff_tensor_pdb,
                              file=data[1])

    def test_create_diff_tensor_pdb_argfail_dir(self):
        """The dir arg test of the structure.create_diff_tensor_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.structure_fns.create_diff_tensor_pdb,
                              dir=data[1])

    def test_create_diff_tensor_pdb_argfail_force(self):
        """The force arg test of the structure.create_diff_tensor_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.structure_fns.create_diff_tensor_pdb,
                              force=data[1])

    def test_create_vector_dist_argfail_length(self):
        """The length arg test of the structure.create_vector_dist() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the number arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int' or data[0] == 'float':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError,
                              self.structure_fns.create_vector_dist,
                              length=data[1])

    def test_create_vector_dist_argfail_file(self):
        """The file arg test of the structure.create_vector_dist() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.structure_fns.create_vector_dist,
                              file=data[1])

    def test_create_vector_dist_argfail_dir(self):
        """The dir arg test of the structure.create_vector_dist() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.structure_fns.create_vector_dist,
                              dir=data[1])

    def test_create_vector_dist_argfail_symmetry(self):
        """The symmetry arg test of the structure.create_vector_dist() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.structure_fns.create_vector_dist,
                              symmetry=data[1])

    def test_create_vector_dist_argfail_force(self):
        """The force arg test of the structure.create_vector_dist() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.structure_fns.create_vector_dist,
                              force=data[1])

    def test_load_spins_argfail_spin_id(self):
        """The spin_id arg test of the structure.load_spins() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.structure_fns.load_spins,
                              spin_id=data[1])

    def test_read_pdb_argfail_file(self):
        """The file arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.structure_fns.read_pdb,
                              file=data[1])

    def test_read_pdb_argfail_dir(self):
        """The dir arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.structure_fns.read_pdb,
                              file='test.pdb',
                              dir=data[1])

    def test_read_pdb_argfail_read_mol(self):
        """The read_mol arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, bin, int, and int list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'bin' or data[
                    0] == 'int' or data[0] == 'int list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntListIntError,
                              self.structure_fns.read_pdb,
                              file='test.pdb',
                              read_mol=data[1])

    def test_read_pdb_argfail_set_mol_name(self):
        """The set_mol_name arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, str, and str list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str' or data[0] == 'str list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrListStrError,
                              self.structure_fns.read_pdb,
                              file='test.pdb',
                              set_mol_name=data[1])

    def test_read_pdb_argfail_read_model(self):
        """The read_model arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, bin, int, and int list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'bin' or data[
                    0] == 'int' or data[0] == 'int list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntListIntError,
                              self.structure_fns.read_pdb,
                              file='test.pdb',
                              read_model=data[1])

    def test_read_pdb_argfail_set_model_num(self):
        """The set_model_num arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, bin, int, and int list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'bin' or data[
                    0] == 'int' or data[0] == 'int list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntListIntError,
                              self.structure_fns.read_pdb,
                              file='test.pdb',
                              set_model_num=data[1])
Exemple #25
0
class Test_noe(TestCase):
    """Unit tests for the functions of the 'prompt.noe' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_noe, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.noe_fns = self.interpreter.noe

    def test_read_restraints_argfail_file(self):
        """The file arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the file and str arguments, and skip them.
            if data[0] in ['file', 'str']:
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileError,
                              self.noe_fns.read_restraints,
                              file=data[1])

    def test_read_restraints_argfail_dir(self):
        """The dir arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.noe_fns.read_restraints,
                              file='noes',
                              dir=data[1])

    def test_read_restraints_argfail_proton1_col(self):
        """The proton1_col arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.noe_fns.read_restraints,
                              file='noes',
                              proton1_col=data[1])

    def test_read_restraints_argfail_proton2_col(self):
        """The proton2_col arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.noe_fns.read_restraints,
                              file='noes',
                              proton2_col=data[1])

    def test_read_restraints_argfail_lower_col(self):
        """The lower_col arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.noe_fns.read_restraints,
                              file='noes',
                              lower_col=data[1])

    def test_read_restraints_argfail_upper_col(self):
        """The upper_col arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.noe_fns.read_restraints,
                              file='noes',
                              upper_col=data[1])

    def test_read_restraints_argfail_sep(self):
        """The sep arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.noe_fns.read_restraints,
                              file='noes',
                              sep=data[1])

    def test_spectrum_type_argfail_spectrum_type(self):
        """The spectrum_type arg test of the noe.spectrum_type() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.noe_fns.spectrum_type,
                              spectrum_type=data[1])

    def test_spectrum_type_argfail_spectrum_id(self):
        """The spectrum_id arg test of the noe.spectrum_type() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.noe_fns.spectrum_type,
                              spectrum_type='x',
                              spectrum_id=data[1])
class Test_deselect(TestCase):
    """Unit tests for the functions of the 'prompt.deselect' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_deselect, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.deselect_fns = self.interpreter.deselect


    def test_read_argfail_file(self):
        """The file arg test of the deselect.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str' or data[0] == 'file':
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileError, self.deselect_fns.read, file=data[1])


    def test_read_argfail_dir(self):
        """The dir arg test of the deselect.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.deselect_fns.read, file='unresolved', dir=data[1])


    def test_read_argfail_mol_name_col(self):
        """The mol_name_col arg test of the deselect.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.deselect_fns.read, file='unresolved', mol_name_col=data[1])


    def test_read_argfail_res_num_col(self):
        """The res_num_col arg test of the deselect.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.deselect_fns.read, file='unresolved', res_num_col=data[1])


    def test_read_argfail_res_name_col(self):
        """The res_name_col arg test of the deselect.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.deselect_fns.read, file='unresolved', res_name_col=data[1])


    def test_read_argfail_spin_num_col(self):
        """The spin_num_col arg test of the deselect.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.deselect_fns.read, file='unresolved', spin_num_col=data[1])


    def test_read_argfail_spin_name_col(self):
        """The spin_name_col arg test of the deselect.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.deselect_fns.read, file='unresolved', spin_name_col=data[1])


    def test_read_argfail_sep(self):
        """The sep arg test of the deselect.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.deselect_fns.read, file='unresolved', sep=data[1])


    def test_read_argfail_change_all(self):
        """The change_all arg test of the deselect.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.deselect_fns.read, file='unresolved', change_all=data[1])


    def test_reverse_argfail_spin_id(self):
        """The spin_id arg test of the deselect.reverse() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.deselect_fns.reverse, spin_id=data[1])


    def test_spin_argfail_spin_id(self):
        """The spin_id arg test of the deselect.spin() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.deselect_fns.spin, spin_id=data[1])


    def test_spin_argfail_change_all(self):
        """The change_all arg test of the deselect.spin() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.deselect_fns.spin, change_all=data[1])
Exemple #27
0
class Test_sequence(Sequence_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.sequence' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_sequence, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.sequence_fns = self.interpreter.sequence

    def test_copy_argfail_pipe_from(self):
        """The pipe_from arg test of the sequence.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.sequence_fns.copy,
                              pipe_from=data[1])

    def test_copy_argfail_pipe_to(self):
        """The pipe_to arg test of the sequence.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.sequence_fns.copy,
                              pipe_to=data[1])

    def test_copy_argfail_both_pipes(self):
        """The pipe_from and pipe_to arg test of the sequence.copy() user function."""

        # Test that both cannot be None (the default)!
        self.assertRaises(RelaxError, self.sequence_fns.copy)

    def test_display_argfail_sep(self):
        """The proper failure of the sequence.display() user function for the sep argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.sequence_fns.display,
                              sep=data[1])

    def test_display_argfail_mol_name_flag(self):
        """The proper failure of the sequence.display() user function for the mol_name_flag argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.display,
                              mol_name_flag=data[1])

    def test_display_argfail_res_num_flag(self):
        """The proper failure of the sequence.display() user function for the res_num_flag argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.display,
                              res_num_flag=data[1])

    def test_display_argfail_res_name_flag(self):
        """The proper failure of the sequence.display() user function for the res_name_flag argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.display,
                              res_name_flag=data[1])

    def test_display_argfail_spin_num_flag(self):
        """The proper failure of the sequence.display() user function for the spin_num_flag argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.display,
                              spin_num_flag=data[1])

    def test_display_argfail_spin_name_flag(self):
        """The proper failure of the sequence.display() user function for the spin_name_flag argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.display,
                              spin_name_flag=data[1])

    def test_read_argfail_file(self):
        """Test the proper failure of the sequence.read() user function for the file argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the file and str arguments, and skip them.
            if data[0] in ['file', 'str']:
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileError,
                              self.sequence_fns.read,
                              file=data[1])

    def test_read_argfail_dir(self):
        """Test the proper failure of the sequence.read() user function for the dir argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.sequence_fns.read,
                              file='a',
                              dir=data[1])

    def test_read_argfail_mol_name_col(self):
        """The proper failure of the sequence.read() user function for the mol_name_col argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.sequence_fns.read,
                              file='a',
                              mol_name_col=data[1])

    def test_read_argfail_res_num_col(self):
        """The proper failure of the sequence.read() user function for the res_num_col argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.sequence_fns.read,
                              file='a',
                              res_num_col=data[1])

    def test_read_argfail_res_name_col(self):
        """The proper failure of the sequence.read() user function for the res_name_col argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.sequence_fns.read,
                              file='a',
                              res_name_col=data[1])

    def test_read_argfail_spin_num_col(self):
        """The proper failure of the sequence.read() user function for the spin_num_col argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.sequence_fns.read,
                              file='a',
                              spin_num_col=data[1])

    def test_read_argfail_spin_name_col(self):
        """The proper failure of the sequence.read() user function for the spin_name_col argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.sequence_fns.read,
                              file='a',
                              spin_name_col=data[1])

    def test_read_argfail_sep(self):
        """The proper failure of the sequence.read() user function for the sep argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.sequence_fns.read,
                              file='a',
                              sep=data[1])

    def test_write_argfail_file(self):
        """Test the proper failure of the sequence.write() user function for the file argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the file and str arguments, and skip them.
            if data[0] in ['file', 'str']:
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileError,
                              self.sequence_fns.write,
                              file=data[1])

    def test_write_argfail_dir(self):
        """Test the proper failure of the sequence.write() user function for the dir argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.sequence_fns.write,
                              file='a',
                              dir=data[1])

    def test_write_argfail_mol_name_flag(self):
        """The proper failure of the sequence.write() user function for the mol_name_flag argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.write,
                              file='a',
                              mol_name_flag=data[1])

    def test_write_argfail_res_num_flag(self):
        """The proper failure of the sequence.write() user function for the res_num_flag argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.write,
                              file='a',
                              res_num_flag=data[1])

    def test_write_argfail_res_name_flag(self):
        """The proper failure of the sequence.write() user function for the res_name_flag argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.write,
                              file='a',
                              res_name_flag=data[1])

    def test_write_argfail_spin_num_flag(self):
        """The proper failure of the sequence.write() user function for the spin_num_flag argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.write,
                              file='a',
                              spin_num_flag=data[1])

    def test_write_argfail_spin_name_flag(self):
        """The proper failure of the sequence.write() user function for the spin_name_flag argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.write,
                              file='a',
                              spin_name_flag=data[1])

    def test_write_argfail_sep(self):
        """The proper failure of the sequence.write() user function for the sep argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.sequence_fns.write,
                              file='a',
                              sep=data[1])

    def test_write_argfail_force(self):
        """The force arg test of the sequence.write() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.sequence_fns.write,
                              file='a',
                              force=data[1])
Exemple #28
0
    def __init__(self,
                 stage=1,
                 results_dir=None,
                 num_ens=10000,
                 num_models=10,
                 configs=None,
                 snapshot_dir='snapshots',
                 snapshot_min=None,
                 snapshot_max=None,
                 pseudo=None,
                 noe_file=None,
                 noe_norm=None,
                 rdc_name=None,
                 rdc_file=None,
                 rdc_spin_id1_col=None,
                 rdc_spin_id2_col=None,
                 rdc_data_col=None,
                 rdc_error_col=None,
                 bond_length=None,
                 bond_length_file=None,
                 log=None,
                 bucket_num=200,
                 lower_lim_noe=0.0,
                 upper_lim_noe=600.0,
                 lower_lim_rdc=0.0,
                 upper_lim_rdc=1.0):
        """Set up for the stereochemistry analysis.

        @keyword stage:             Stage of analysis (see the module docstring above for the options).  
        @type stage:                int
        @keyword results_dir:       The optional directory to place all results files into.
        @type results_dir:          None or str
        @keyword num_ens:           Number of ensembles.
        @type num_ens:              int
        @keyword num_models:        Ensemble size.
        @type num_models:           int
        @keyword configs:           All the configurations.
        @type configs:              list of str
        @keyword snapshot_dir:      Snapshot directories (corresponding to the configurations).
        @type snapshot_dir:         list of str
        @keyword snapshot_min:      The number of the first snapshots (corresponding to the configurations).
        @type snapshot_min:         list of int
        @keyword snapshot_max:      The number of the last snapshots (corresponding to the configurations).
        @type snapshot_max:         list of int
        @keyword pseudo:            The list of pseudo-atoms.  Each element is a list of the pseudo-atom name and a list of all those atoms forming the pseudo-atom.  For example, pseudo = [["Q7", ["@H16", "@H17", "@H18"]], ["Q9", ["@H20", "@H21", "@H22"]]].
        @type pseudo:               list of list of str and list of str
        @keyword noe_file:          The name of the NOE restraint file.
        @type noe_file:             str
        @keyword noe_norm:          The NOE normalisation factor (equal to the sum of all NOEs squared).
        @type noe_norm:             float
        @keyword rdc_name:          The label for this RDC data set.
        @type rdc_name:             str
        @keyword rdc_file:          The name of the RDC file.
        @type rdc_file:             str
        @keyword rdc_spin_id1_col:  The spin ID column of the first spin in the RDC file.
        @type rdc_spin_id1_col:     None or int
        @keyword rdc_spin_id2_col:  The spin ID column of the second spin in the RDC file.
        @type rdc_spin_id2_col:     None or int
        @keyword rdc_data_col:      The data column of the RDC file.
        @type rdc_data_col:         int
        @keyword rdc_error_col:     The error column of the RDC file.
        @type rdc_error_col:        int
        @keyword bond_length:       The bond length value in meters.  This overrides the bond_length_file argument.
        @type bond_length:          float or None
        @keyword bond_length_file:  The file of bond lengths for each atom pair in meters.  The first and second columns must be the spin ID strings and the third column must contain the data.
        @type bond_length_file:     float or None
        @keyword log:               Log file output flag (only for certain stages).
        @type log:                  bool
        @keyword bucket_num:        Number of buckets for the distribution plots.
        @type bucket_num:           int
        @keyword lower_lim_noe:     Distribution plot limits.
        @type lower_lim_noe:        int
        @keyword upper_lim_noe:     Distribution plot limits.
        @type upper_lim_noe:        int
        @keyword lower_lim_rdc:     Distribution plot limits.
        @type lower_lim_rdc:        int
        @keyword upper_lim_rdc:     Distribution plot limits.
        @type upper_lim_rdc:        int
        """

        # Initial printout.
        title(file=sys.stdout, text="Stereochemistry auto-analysis")

        # Safely execute the full protocol.
        try:
            # Execution lock.
            status.exec_lock.acquire('auto stereochem analysis',
                                     mode='auto-analysis')

            # Set up the analysis status object.
            status.init_auto_analysis('stereochem', type='stereochem')
            status.current_analysis = 'auto stereochem analysis'

            # Store all the args.
            self.stage = stage
            self.results_dir = results_dir
            self.num_ens = num_ens
            self.num_models = num_models
            self.configs = configs
            self.snapshot_dir = snapshot_dir
            self.snapshot_min = snapshot_min
            self.snapshot_max = snapshot_max
            self.pseudo = pseudo
            self.noe_file = noe_file
            self.noe_norm = noe_norm
            self.rdc_name = rdc_name
            self.rdc_file = rdc_file
            self.rdc_spin_id1_col = rdc_spin_id1_col
            self.rdc_spin_id2_col = rdc_spin_id2_col
            self.rdc_data_col = rdc_data_col
            self.rdc_error_col = rdc_error_col
            self.bond_length = bond_length
            self.bond_length_file = bond_length_file
            self.log = log
            self.bucket_num = bucket_num
            self.lower_lim_noe = lower_lim_noe
            self.upper_lim_noe = upper_lim_noe
            self.lower_lim_rdc = lower_lim_rdc
            self.upper_lim_rdc = upper_lim_rdc

            # Load the interpreter.
            self.interpreter = Interpreter(show_script=False,
                                           raise_relax_error=True)
            self.interpreter.populate_self()
            self.interpreter.on(verbose=False)

            # Create the results directory.
            if self.results_dir:
                mkdir_nofail(self.results_dir)

            # Or use the current working directory.
            else:
                self.results_dir = getcwd()

            # Create a directory for log files.
            if self.log:
                mkdir_nofail(self.results_dir + sep + "logs")

        # Clean up.
        finally:
            # Final printout.
            title(file=sys.stdout,
                  text="Completion of the stereochemistry auto-analysis")
            print_elapsed_time(time() - status.start_time)

            # Finish and unlock execution.
            status.auto_analysis['stereochem'].fin = True
            status.current_analysis = None
            status.exec_lock.release()
Exemple #29
0
class Test_spectrum(TestCase):
    """Unit tests for the functions of the 'prompt.spectrum' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_spectrum, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.spectrum_fns = self.interpreter.spectrum

    def test_baseplane_rmsd_argfail_error(self):
        """The error arg test of the spectrum.baseplane_rmsd() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, bin, and int arguments, and skip them.
            if data[0] == 'float' or data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError,
                              self.spectrum_fns.baseplane_rmsd,
                              error=data[1])

    def test_baseplane_rmsd_argfail_spectrum_id(self):
        """The spectrum_id arg test of the spectrum.baseplane_rmsd() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.spectrum_fns.baseplane_rmsd,
                              spectrum_id=data[1])

    def test_baseplane_rmsd_argfail_spin_id(self):
        """The spin_id arg test of the spectrum.baseplane_rmsd() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.spectrum_fns.baseplane_rmsd,
                              spectrum_id='x',
                              spin_id=data[1])

    def test_integration_points_argfail_N(self):
        """The N arg test of the spectrum.integration_points() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bin and int arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.spectrum_fns.integration_points,
                              N=data[1])

    def test_integration_points_argfail_spectrum_id(self):
        """The spectrum_id arg test of the spectrum.integration_points() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.spectrum_fns.integration_points,
                              N=0,
                              spectrum_id=data[1])

    def test_integration_points_argfail_spin_id(self):
        """The spin_id arg test of the spectrum.integration_points() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.spectrum_fns.integration_points,
                              N=0,
                              spectrum_id='x',
                              spin_id=data[1])

    def test_read_intensities_argfail_file(self):
        """The file arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the file, file list, str, and str list arguments, and skip them.
            if data[0] in ['file', 'file list', 'str', 'str list']:
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileListStrFileError,
                              self.spectrum_fns.read_intensities,
                              file=data[1])

    def test_read_intensities_argfail_dir(self):
        """The dir arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              dir=data[1])

    def test_read_intensities_argfail_spectrum_id(self):
        """The spectrum_id arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str or str list arguments, and skip them.
            if data[0] in ['str', 'str list']:
                continue

            # The argument test.
            self.assertRaises(RelaxStrListStrError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              spectrum_id=data[1])

    def test_read_intensities_argfail_dim(self):
        """The dim arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int argument, and skip it.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              spectrum_id='x',
                              dim=data[1])

    def test_read_intensities_argfail_int_col(self):
        """The int_col arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, bin, or integer list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[
                    0] == 'bin' or data[0] == 'int list':
                continue

            # The argument test.
            self.assertRaises(RelaxIntListIntError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              spectrum_id='x',
                              int_col=data[1])

    def test_read_intensities_argfail_int_method(self):
        """The int_method arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              spectrum_id='x',
                              int_method=data[1])

    def test_read_intensities_argfail_mol_name_col(self):
        """The mol_name_col arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              spectrum_id='x',
                              int_method='y',
                              mol_name_col=data[1])

    def test_read_intensities_argfail_res_num_col(self):
        """The res_num_col arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              spectrum_id='x',
                              int_method='y',
                              res_num_col=data[1])

    def test_read_intensities_argfail_res_name_col(self):
        """The res_name_col arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              spectrum_id='x',
                              int_method='y',
                              res_name_col=data[1])

    def test_read_intensities_argfail_spin_num_col(self):
        """The spin_num_col arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              spectrum_id='x',
                              int_method='y',
                              spin_num_col=data[1])

    def test_read_intensities_argfail_spin_name_col(self):
        """The spin_name_col arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              spectrum_id='x',
                              int_method='y',
                              spin_name_col=data[1])

    def test_read_intensities_argfail_sep(self):
        """The sep arg test of the spectrum.read_intensities() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.spectrum_fns.read_intensities,
                              file='a',
                              spectrum_id='x',
                              int_method='y',
                              sep=data[1])
Exemple #30
0
class Test_select(TestCase):
    """Unit tests for the functions of the 'prompt.select' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_select, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.select_fns = self.interpreter.select

    def test_read_argfail_file(self):
        """The file arg test of the select.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str' or data[0] == 'file':
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileError,
                              self.select_fns.read,
                              file=data[1])

    def test_read_argfail_dir(self):
        """The dir arg test of the select.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.select_fns.read,
                              file='unresolved',
                              dir=data[1])

    def test_read_argfail_mol_name_col(self):
        """The mol_name_col arg test of the select.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError,
                              self.select_fns.read,
                              file='unresolved',
                              mol_name_col=data[1])

    def test_read_argfail_res_num_col(self):
        """The res_num_col arg test of the select.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError,
                              self.select_fns.read,
                              file='unresolved',
                              res_num_col=data[1])

    def test_read_argfail_res_name_col(self):
        """The res_name_col arg test of the select.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError,
                              self.select_fns.read,
                              file='unresolved',
                              res_name_col=data[1])

    def test_read_argfail_spin_num_col(self):
        """The spin_num_col arg test of the select.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError,
                              self.select_fns.read,
                              file='unresolved',
                              spin_num_col=data[1])

    def test_read_argfail_spin_name_col(self):
        """The spin_name_col arg test of the select.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError,
                              self.select_fns.read,
                              file='unresolved',
                              spin_name_col=data[1])

    def test_read_argfail_sep(self):
        """The sep arg test of the select.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.select_fns.read,
                              file='unresolved',
                              sep=data[1])

    def test_read_argfail_boolean(self):
        """The boolean arg test of the select.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.select_fns.read,
                              file='unresolved',
                              boolean=data[1])

    def test_read_argfail_change_all(self):
        """The change_all arg test of the select.read() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.select_fns.read,
                              file='unresolved',
                              change_all=data[1])

    def test_reverse_argfail_spin_id(self):
        """The spin_id arg test of the select.reverse() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.select_fns.reverse,
                              spin_id=data[1])

    def test_spin_argfail_spin_id(self):
        """The spin_id arg test of the select.spin() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.select_fns.spin,
                              spin_id=data[1])

    def test_spin_argfail_boolean(self):
        """The boolean arg test of the select.spin() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.select_fns.spin,
                              boolean=data[1])

    def test_spin_argfail_change_all(self):
        """The change_all arg test of the select.spin() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.select_fns.spin,
                              change_all=data[1])
Exemple #31
0
class Test_align_tensor(Align_tensor_base_class):
    """Unit tests for the functions of the 'prompt.align_tensor' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_align_tensor, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.align_tensor_fns = self.interpreter.align_tensor


    def test_copy_argfail_tensor_from(self):
        """Failure of the tensor_from arg of the align_tensor.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.copy, tensor_from=data[1])


    def test_copy_argfail_pipe_from(self):
        """The pipe_from arg test of the align_tensor.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.copy, tensor_from='Pf1', pipe_from=data[1])


    def test_copy_argfail_tensor_to(self):
        """Failure of the tensor_to arg of the align_tensor.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'None' or  data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.copy, tensor_to=data[1])


    def test_copy_argfail_pipe_to(self):
        """The pipe_to arg test of the align_tensor.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.copy, tensor_from='Pf1', tensor_to='Pf1', pipe_to=data[1])


    def test_copy_argfail_both_pipes(self):
        """The pipe_from and pipe_to arg test of the align_tensor.copy() user function."""

        # Test that both cannot be None (the default)!
        self.assertRaises(RelaxError, self.align_tensor_fns.copy, tensor_from='Pf1', tensor_to='Pf1')


    def test_delete_argfail_tensor(self):
        """Failure of the tensor arg of the align_tensor.delete() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.delete, tensor=data[1])


    def test_display_argfail_tensor(self):
        """Failure of the tensor arg of the align_tensor.display() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.display, tensor=data[1])


    def test_init_argfail_tensor(self):
        """Failure of the tensor arg of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.init, tensor=data[1])


    def test_init_argfail_align_id(self):
        """Failure of the align_id arg of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.init, align_id=data[1])


    def test_init_argfail_domain(self):
        """Failure of the domain arg of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.init, align_id='Pf1', domain=data[1])


    def test_init_argfail_params(self):
        """Failure of the params arg of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the tuple arguments.
            if data[0] == 'None' or data[0] == 'tuple' or data[0] == 'float tuple' or data[0] == 'str tuple':
                # Correct tuple length.
                if data[0] == 'None' or len(data[1]) == 5:
                    continue

            # The argument test.
            self.assertRaises(RelaxNoneNumTupleNumError, self.align_tensor_fns.init, align_id='Pf1', params=data[1])


    def test_init_argfail_scale(self):
        """The scale arg test of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float argument, and skip it.
            if data[0] == 'float':
                continue

            # The argument test.
            self.assertRaises(RelaxFloatError, self.align_tensor_fns.init, align_id='Pf1', params=(0.0, 0.0, 0.0, 0.0, 0.0), scale=data[1])


    def test_init_argfail_angle_units(self):
        """The angle_units arg test of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.init, params=(0.0, 0.0, 0.0, 0.0, 0.0), angle_units=data[1])


    def test_init_argfail_param_types(self):
        """The proper failure of the align_tensor.init() user function for the param_types argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError, self.align_tensor_fns.init, align_id='Pf1', params=(0.0, 0.0, 0.0, 0.0, 0.0), param_types=data[1])


    def test_init_argfail_errors(self):
        """The errors arg test of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.align_tensor_fns.init, align_id='Pf1', params=(0.0, 0.0, 0.0, 0.0, 0.0), errors=data[1])


    def test_matrix_angles_argfail_basis_set(self):
        """The proper failure of the align_tensor.matrix_angles() user function for the basis_set argument."""

        # Add an alignment tensor.
        align_tensor.init(align_id='a', params=(0.0, 0.0, 0.0, 0.0, 0.0))

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.matrix_angles, basis_set=data[1])


    def test_matrix_angles_argfail_basis_tensors(self):
        """The tensors arg unit test of the align_tensor.matrix_angles() user function."""

        # Add an alignment tensor.
        align_tensor.init(align_id='a', params=(0.0, 0.0, 0.0, 0.0, 0.0))

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListStrError, self.align_tensor_fns.matrix_angles, tensors=data[1])


    def test_reduction_argfail_full_tensor(self):
        """Failure of the full_tensor arg of the align_tensor.reduction() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.reduction, full_tensor=data[1])


    def test_reduction_argfail_red_tensor(self):
        """Failure of the red_tensor arg of the align_tensor.reduction() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.reduction, full_tensor='test', red_tensor=data[1])

    def test_set_domain_argfail_tensor(self):
        """Failure of the tensor arg of the align_tensor.set_domain() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.set_domain, tensor=data[1])


    def test_set_domain_argfail_domain(self):
        """Failure of the domain arg of the align_tensor.set_domain() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.set_domain, domain=data[1])


    def test_svd_argfail_basis_set(self):
        """The proper failure of the align_tensor.svd() user function for the basis_set argument."""

        # Add an alignment tensor.
        align_tensor.init(align_id='a', params=(0.0, 0.0, 0.0, 0.0, 0.0))

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.svd, basis_set=data[1])


    def test_svd_argfail_basis_tensors(self):
        """The tensors arg unit test of the align_tensor.svd() user function."""

        # Add an alignment tensor.
        align_tensor.init(align_id='a', params=(0.0, 0.0, 0.0, 0.0, 0.0))

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListStrError, self.align_tensor_fns.svd, tensors=data[1])
Exemple #32
0
    def __init__(self, pipe_name=None, pipe_bundle=None, file_root='rx', results_dir=None, grid_inc=11, mc_sim_num=500, view_plots=True):
        """Perform relaxation curve fitting.

        To use this auto-analysis, a data pipe with all the required data needs to be set up.  This data pipe should contain the following:

            - All the spins loaded.
            - Unresolved spins deselected.
            - All the peak intensities loaded and relaxation delay times set.
            - Either the baseplane noise RMSD values should be set or replicated spectra loaded.

        @keyword pipe_name:     The name of the data pipe containing all of the data for the analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:      str
        @keyword file_root:     File root of the output filea.
        @type file_root:        str
        @keyword results_dir:   The directory where results files are saved.
        @type results_dir:      str
        @keyword grid_inc:      Number of grid search increments.
        @type grid_inc:         int
        @keyword mc_sim_num:    The number of Monte Carlo simulations to be used for error analysis at the end of the analysis.
        @type mc_sim_num:       int
        @keyword view_plots:    Flag to automatically view grace plots after calculation.
        @type view_plots:       bool
        """

        # Execution lock.
        status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

        # Set up the analysis status object.
        status.init_auto_analysis(pipe_bundle, type='relax_fit')
        status.current_analysis = pipe_bundle

        # Store the args.
        self.pipe_name = pipe_name
        self.pipe_bundle = pipe_bundle
        self.file_root = file_root
        self.results_dir = results_dir
        if self.results_dir:
            self.grace_dir = results_dir + sep + 'grace'
        else:
            self.grace_dir = 'grace'
        self.mc_sim_num = mc_sim_num
        self.grid_inc = grid_inc
        self.view_plots = view_plots

        # Data checks.
        self.check_vars()

        # Set the data pipe to the current data pipe.
        if self.pipe_name != cdp_name():
            switch(self.pipe_name)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Execute.
        self.run()

        # Finish and unlock execution.
        status.auto_analysis[self.pipe_bundle].fin = True
        status.current_analysis = None
        status.exec_lock.release()
Exemple #33
0
class Relax_fit:
    def __init__(self, pipe_name=None, pipe_bundle=None, file_root='rx', results_dir=None, grid_inc=11, mc_sim_num=500, view_plots=True):
        """Perform relaxation curve fitting.

        To use this auto-analysis, a data pipe with all the required data needs to be set up.  This data pipe should contain the following:

            - All the spins loaded.
            - Unresolved spins deselected.
            - All the peak intensities loaded and relaxation delay times set.
            - Either the baseplane noise RMSD values should be set or replicated spectra loaded.

        @keyword pipe_name:     The name of the data pipe containing all of the data for the analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:      str
        @keyword file_root:     File root of the output filea.
        @type file_root:        str
        @keyword results_dir:   The directory where results files are saved.
        @type results_dir:      str
        @keyword grid_inc:      Number of grid search increments.
        @type grid_inc:         int
        @keyword mc_sim_num:    The number of Monte Carlo simulations to be used for error analysis at the end of the analysis.
        @type mc_sim_num:       int
        @keyword view_plots:    Flag to automatically view grace plots after calculation.
        @type view_plots:       bool
        """

        # Execution lock.
        status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

        # Set up the analysis status object.
        status.init_auto_analysis(pipe_bundle, type='relax_fit')
        status.current_analysis = pipe_bundle

        # Store the args.
        self.pipe_name = pipe_name
        self.pipe_bundle = pipe_bundle
        self.file_root = file_root
        self.results_dir = results_dir
        if self.results_dir:
            self.grace_dir = results_dir + sep + 'grace'
        else:
            self.grace_dir = 'grace'
        self.mc_sim_num = mc_sim_num
        self.grid_inc = grid_inc
        self.view_plots = view_plots

        # Data checks.
        self.check_vars()

        # Set the data pipe to the current data pipe.
        if self.pipe_name != cdp_name():
            switch(self.pipe_name)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Execute.
        self.run()

        # Finish and unlock execution.
        status.auto_analysis[self.pipe_bundle].fin = True
        status.current_analysis = None
        status.exec_lock.release()


    def run(self):
        """Set up and run the curve-fitting."""

        # Peak intensity error analysis.
        self.error_analysis()

        # Grid search.
        self.interpreter.minimise.grid_search(inc=self.grid_inc)

        # Minimise.
        self.interpreter.minimise.execute('newton', scaling=False, constraints=False)

        # Monte Carlo simulations.
        self.interpreter.monte_carlo.setup(number=self.mc_sim_num)
        self.interpreter.monte_carlo.create_data()
        self.interpreter.monte_carlo.initial_values()
        self.interpreter.minimise.execute('newton', scaling=False, constraints=False)
        self.interpreter.monte_carlo.error_analysis()

        # Determine the normalisation type and if the Iinf parameter exists.
        norm_type = 'last'
        iinf = True
        for spin in spin_loop(skip_desel=True):
            if spin.model not in ['sat', 'inv']:
                norm_type = 'first'
                iinf = False
                break

        # Save the relaxation rates and other parameter values.
        self.interpreter.value.write(param='i0', file='i0.out', dir=self.results_dir, force=True)
        if iinf:
            self.interpreter.value.write(param='iinf', file='iinf.out', dir=self.results_dir, force=True)
        self.interpreter.value.write(param='rx', file=self.file_root+'.out', dir=self.results_dir, force=True)

        # Save the results.
        self.interpreter.results.write(file='results', dir=self.results_dir, force=True)

        # Create Grace plots of the data.
        self.interpreter.grace.write(y_data_type='chi2', file='chi2.agr', dir=self.grace_dir, force=True)    # Minimised chi-squared value.
        self.interpreter.grace.write(y_data_type='i0', file='i0.agr', dir=self.grace_dir, force=True)    # Initial peak intensity.
        if iinf:
            self.interpreter.grace.write(y_data_type='iinf', file='iinf.agr', dir=self.grace_dir, force=True)    # Infinite peak intensity.
        self.interpreter.grace.write(y_data_type='rx', file=self.file_root+'.agr', dir=self.grace_dir, force=True)    # Relaxation rate.
        self.interpreter.grace.write(x_data_type='relax_times', y_data_type='peak_intensity', file='intensities.agr', dir=self.grace_dir, force=True)    # Average peak intensities.
        self.interpreter.grace.write(x_data_type='relax_times', y_data_type='peak_intensity', norm_type=norm_type, norm=True, file='intensities_norm.agr', dir=self.grace_dir, force=True)    # Average peak intensities (normalised).

        # Write a python "grace to PNG/EPS/SVG..." conversion script.
        # Open the file for writing.
        file_name = "grace2images.py"
        file_path = get_file_path(file_name=file_name, dir=self.grace_dir)
        file = open_write_file(file_name=file_name, dir=self.grace_dir, force=True)

        # Write the file.
        script_grace2images(file=file)

        # Close the batch script, then make it executable (expanding any ~ characters).
        file.close()

        if self.grace_dir:
            dir = expanduser(self.grace_dir)
            chmod(dir + sep + file_name, S_IRWXU|S_IRGRP|S_IROTH)
        else:
            file_name = expanduser(file_name)
            chmod(file_name, S_IRWXU|S_IRGRP|S_IROTH)


        # Display the Grace plots if selected.
        if self.view_plots:
            self.interpreter.grace.view(file='chi2.agr', dir=self.grace_dir)
            self.interpreter.grace.view(file='i0.agr', dir=self.grace_dir)
            self.interpreter.grace.view(file=self.file_root+'.agr', dir=self.grace_dir)
            self.interpreter.grace.view(file='intensities.agr', dir=self.grace_dir)
            self.interpreter.grace.view(file='intensities_norm.agr', dir=self.grace_dir)

        # Save the program state.
        self.interpreter.state.save(state=self.file_root+'.save', dir=self.results_dir, force=True)


    def error_analysis(self):
        """Perform an error analysis of the peak intensities."""

        # Printout.
        section(file=sys.stdout, text="Error analysis", prespace=2)

        # Check if intensity errors have already been calculated by the user.
        precalc = True
        for spin in spin_loop(skip_desel=True):
            # No structure.
            if not hasattr(spin, 'peak_intensity_err'):
                precalc = False
                break

            # Determine if a spectrum ID is missing from the list.
            for id in cdp.spectrum_ids:
                if id not in spin.peak_intensity_err:
                    precalc = False
                    break

        # Skip.
        if precalc:
            print("Skipping the error analysis as it has already been performed.")
            return

        # Check if there is replicates, and the user has not specified them.

        # Set flag for dublicates.
        has_dub = False

        if not hasattr(cdp, 'replicates'):
            # Collect all times, and matching spectrum ID.
            all_times = []
            all_id = []
            for spectrum_id in cdp.relax_times:
                all_times.append(cdp.relax_times[spectrum_id])
                all_id.append(spectrum_id)

            # Get the dublicates.
            dublicates = [(val, [i for i in range(len(all_times)) if all_times[i] == val]) for val in all_times]

            # Loop over the list of the mapping of times and duplications.
            list_dub_mapping = []
            for i, dub in enumerate(dublicates):
                # Get current spectum id.
                cur_spectrum_id = all_id[i]

                # Get the tuple of time and indexes of duplications.
                time, list_index_occur = dub

                # Collect mapping of index to id.
                id_list = []
                if len(list_index_occur) > 1:
                    # There exist dublications.
                    has_dub = True

                    for list_index in list_index_occur:
                        id_list.append(all_id[list_index])

                # Store to list
                list_dub_mapping.append((cur_spectrum_id, id_list))

        # If there is dublication, then assign them.
        if has_dub:
            # Assign dublicates.
            for spectrum_id, dub_pair in list_dub_mapping:
                if len(dub_pair) > 0:
                    self.interpreter.spectrum.replicated(spectrum_ids=dub_pair)

        # Run the error analysis.
        self.interpreter.spectrum.error_analysis()


    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # The pipe name.
        if not has_pipe(self.pipe_name):
            raise RelaxNoPipeError(self.pipe_name)
Exemple #34
0
class Test_model_free(Model_free_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.model_free' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_model_free, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.model_free_fns = self.interpreter.model_free

    def test_create_model_argfail_model(self):
        """The model arg test of the model_free.create_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.model_free_fns.create_model,
                              model=data[1])

    def test_create_model_argfail_equation(self):
        """The equation arg test of the model_free.create_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.model_free_fns.create_model,
                              equation=data[1])

    def test_create_model_argfail_params(self):
        """The params arg test of the model_free.create_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str list argument, and skip it.
            if data[0] == 'str list':
                continue

            # The argument test.
            self.assertRaises(RelaxListStrError,
                              self.model_free_fns.create_model,
                              model='test',
                              equation='test',
                              params=data[1])

    def test_create_model_argfail_spin_id(self):
        """The spin_id arg test of the model_free.create_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.model_free_fns.create_model,
                              model='test',
                              equation='test',
                              params=['test'],
                              spin_id=data[1])

    def test_remove_tm_argfail_spin_id(self):
        """The spin_id arg test of the model_free.remove_tm() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.model_free_fns.remove_tm,
                              spin_id=data[1])

    def test_select_model_argfail_model(self):
        """The model arg test of the model_free.select_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.model_free_fns.select_model,
                              model=data[1])

    def test_select_model_argfail_spin_id(self):
        """The spin_id arg test of the model_free.select_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.model_free_fns.select_model,
                              model='test',
                              spin_id=data[1])
Exemple #35
0
class Test_noe(TestCase):
    """Unit tests for the functions of the 'prompt.noe' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_noe, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.noe_fns = self.interpreter.noe


    def test_read_restraints_argfail_file(self):
        """The file arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.noe_fns.read_restraints, file=data[1])


    def test_read_restraints_argfail_dir(self):
        """The dir arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.noe_fns.read_restraints, file='noes', dir=data[1])


    def test_read_restraints_argfail_proton1_col(self):
        """The proton1_col arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.noe_fns.read_restraints, file='noes', proton1_col=data[1])


    def test_read_restraints_argfail_proton2_col(self):
        """The proton2_col arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.noe_fns.read_restraints, file='noes', proton2_col=data[1])


    def test_read_restraints_argfail_lower_col(self):
        """The lower_col arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.noe_fns.read_restraints, file='noes', lower_col=data[1])


    def test_read_restraints_argfail_upper_col(self):
        """The upper_col arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.noe_fns.read_restraints, file='noes', upper_col=data[1])


    def test_read_restraints_argfail_sep(self):
        """The sep arg test of the noe.read_restraints() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.noe_fns.read_restraints, file='noes', sep=data[1])


    def test_spectrum_type_argfail_spectrum_type(self):
        """The spectrum_type arg test of the noe.spectrum_type() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.noe_fns.spectrum_type, spectrum_type=data[1])


    def test_spectrum_type_argfail_spectrum_id(self):
        """The spectrum_id arg test of the noe.spectrum_type() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.noe_fns.spectrum_type, spectrum_type='x', spectrum_id=data[1])
class Test_minimisation(Minimisation_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.minimisation' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_minimisation, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Place the user functions into a container.
        self.minimisation_fns = Container()
        self.minimisation_fns.calc = self.interpreter.calc
        self.minimisation_fns.grid_search = self.interpreter.grid_search
        self.minimisation_fns.minimise = self.interpreter.minimise


    def test_calc_argfail_verbosity(self):
        """The verbosity arg test of the calc() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError, self.minimisation_fns.calc, verbosity=data[1])


    def test_grid_search_argfail_lower(self):
        """The lower arg test of the grid_search() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and int, float, and number list arguments arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int list' or data[0] == 'float list' or data[0] == 'number list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListNumError, self.minimisation_fns.grid_search, lower=data[1])


    def test_grid_search_argfail_upper(self):
        """The upper arg test of the grid_search() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and int, float, and number list arguments arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int list' or data[0] == 'float list' or data[0] == 'number list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListNumError, self.minimisation_fns.grid_search, upper=data[1])


    def test_grid_search_argfail_inc(self):
        """The inc arg test of the grid_search() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bin, int, and interger list arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int' or data[0] == 'int list' or data[0] == 'none list':
                continue

            # The argument test.
            self.assertRaises(RelaxIntListIntError, self.minimisation_fns.grid_search, inc=data[1])


    def test_grid_search_argfail_constraints(self):
        """The constraints arg test of the grid_search() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.minimisation_fns.grid_search, constraints=data[1])


    def test_grid_search_argfail_verbosity(self):
        """The verbosity arg test of the grid_search() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError, self.minimisation_fns.grid_search, verbosity=data[1])


    def test_minimise_argfail_bad_keyword(self):
        """The test of a bad keyword argument in the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # The argument test.
            self.assertRaises(RelaxError, self.minimisation_fns.minimise, 'Newton', step_tol=data[1])


    def test_minimise_argfail_min_algor(self):
        """The min_algor arg test of the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.minimisation_fns.minimise, data[1])


    def test_minimise_argfail_line_search(self):
        """The line_search arg test of the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.minimisation_fns.minimise, 'Newton', line_search=data[1])


    def test_minimise_argfail_hessian_mod(self):
        """The hessian_mod arg test of the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.minimisation_fns.minimise, 'Newton', hessian_mod=data[1])


    def test_minimise_argfail_hessian_type(self):
        """The hessian_type arg test of the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.minimisation_fns.minimise, 'Newton', hessian_type=data[1])


    def test_minimise_argfail_func_tol(self):
        """The func_tol arg test of the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, bin, and int arguments, and skip them.
            if data[0] == 'float' or data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError, self.minimisation_fns.minimise, 'Newton', func_tol=data[1])


    def test_minimise_argfail_grad_tol(self):
        """The grad_tol arg test of the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, float, bin, and int arguments, and skip them.
            if data[0] == 'None' or data[0] == 'float' or data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneNumError, self.minimisation_fns.minimise, 'Newton', grad_tol=data[1])


    def test_minimise_argfail_max_iter(self):
        """The max_iter arg test of the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bin and int arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError, self.minimisation_fns.minimise, 'Newton', max_iter=data[1])


    def test_minimise_argfail_constraints(self):
        """The constraints arg test of the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.minimisation_fns.minimise, 'Newton', constraints=data[1])


    def test_minimise_argfail_scaling(self):
        """The scaling arg test of the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.minimisation_fns.minimise, 'Newton', scaling=data[1])


    def test_minimise_argfail_verbosity(self):
        """The verbosity arg test of the minimise() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bin and int arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError, self.minimisation_fns.minimise, 'Newton', verbosity=data[1])
Exemple #37
0
class Test_diffusion_tensor(Diffusion_tensor_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.diffusion_tensor' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_diffusion_tensor, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.diffusion_tensor_fns = self.interpreter.diffusion_tensor

    def test_copy_argfail_pipe_from(self):
        """The pipe_from arg test of the diffusion_tensor.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.diffusion_tensor_fns.copy,
                              pipe_from=data[1])

    def test_copy_argfail_pipe_to(self):
        """The pipe_to arg test of the diffusion_tensor.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.diffusion_tensor_fns.copy,
                              pipe_to=data[1])

    def test_copy_argfail_both_pipes(self):
        """The pipe_from and pipe_to arg test of the diffusion_tensor.copy() user function."""

        # Test that both cannot be None (the default)!
        self.assertRaises(RelaxError, self.diffusion_tensor_fns.copy)

    def test_init_argfail_params(self):
        """The params arg test of diffusion_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch None, a single float, int, or bin, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[
                    0] == 'bin' or data[0] == 'float':
                continue

            # Catch the tuple arguments.
            if data[0] == 'tuple' or data[0] == 'float tuple' or data[
                    0] == 'str tuple':
                # Correct tuple length.
                if len(data[1]) == 4 or len(data[1]) == 6:
                    continue

            # The argument test.
            self.assertRaises(RelaxNoneNumTupleNumError,
                              self.diffusion_tensor_fns.init,
                              params=data[1])

    def test_init_argfail_time_scale(self):
        """The time_scale arg test of the diffusion_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the number arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int' or data[0] == 'float':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError,
                              self.diffusion_tensor_fns.init,
                              params=1e-9,
                              time_scale=data[1])

    def test_init_argfail_d_scale(self):
        """The d_scale arg test of the diffusion_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the number arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int' or data[0] == 'float':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError,
                              self.diffusion_tensor_fns.init,
                              params=1e-9,
                              d_scale=data[1])

    def test_init_argfail_angle_units(self):
        """The angle_units arg test of the diffusion_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.diffusion_tensor_fns.init,
                              params=1e-9,
                              angle_units=data[1])

    def test_init_argfail_param_types(self):
        """The param_types arg test of the diffusion_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.diffusion_tensor_fns.init,
                              params=1e-9,
                              param_types=data[1])

    def test_init_argfail_spheroid_type(self):
        """The spheroid_type arg test of the diffusion_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.diffusion_tensor_fns.init,
                              params=1e-9,
                              spheroid_type=data[1])

    def test_init_argfail_fixed(self):
        """The fixed arg test of the diffusion_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.diffusion_tensor_fns.init,
                              params=1e-9,
                              fixed=data[1])
class Stereochem_analysis:
    """Class for performing the relative stereochemistry analysis."""

    def __init__(self, stage=1, results_dir=None, num_ens=10000, num_models=10, configs=None, snapshot_dir='snapshots', snapshot_min=None, snapshot_max=None, pseudo=None, noe_file=None, noe_norm=None, rdc_name=None, rdc_file=None, rdc_spin_id1_col=None, rdc_spin_id2_col=None, rdc_data_col=None, rdc_error_col=None, bond_length=None, bond_length_file=None, log=None, bucket_num=200, lower_lim_noe=0.0, upper_lim_noe=600.0, lower_lim_rdc=0.0, upper_lim_rdc=1.0):
        """Set up for the stereochemistry analysis.

        @keyword stage:             Stage of analysis (see the module docstring above for the options).  
        @type stage:                int
        @keyword results_dir:       The optional directory to place all results files into.
        @type results_dir:          None or str
        @keyword num_ens:           Number of ensembles.
        @type num_ens:              int
        @keyword num_models:        Ensemble size.
        @type num_models:           int
        @keyword configs:           All the configurations.
        @type configs:              list of str
        @keyword snapshot_dir:      Snapshot directories (corresponding to the configurations).
        @type snapshot_dir:         list of str
        @keyword snapshot_min:      The number of the first snapshots (corresponding to the configurations).
        @type snapshot_min:         list of int
        @keyword snapshot_max:      The number of the last snapshots (corresponding to the configurations).
        @type snapshot_max:         list of int
        @keyword pseudo:            The list of pseudo-atoms.  Each element is a list of the pseudo-atom name and a list of all those atoms forming the pseudo-atom.  For example, pseudo = [["Q7", ["@H16", "@H17", "@H18"]], ["Q9", ["@H20", "@H21", "@H22"]]].
        @type pseudo:               list of list of str and list of str
        @keyword noe_file:          The name of the NOE restraint file.
        @type noe_file:             str
        @keyword noe_norm:          The NOE normalisation factor (equal to the sum of all NOEs squared).
        @type noe_norm:             float
        @keyword rdc_name:          The label for this RDC data set.
        @type rdc_name:             str
        @keyword rdc_file:          The name of the RDC file.
        @type rdc_file:             str
        @keyword rdc_spin_id1_col:  The spin ID column of the first spin in the RDC file.
        @type rdc_spin_id1_col:     None or int
        @keyword rdc_spin_id2_col:  The spin ID column of the second spin in the RDC file.
        @type rdc_spin_id2_col:     None or int
        @keyword rdc_data_col:      The data column of the RDC file.
        @type rdc_data_col:         int
        @keyword rdc_error_col:     The error column of the RDC file.
        @type rdc_error_col:        int
        @keyword bond_length:       The bond length value in meters.  This overrides the bond_length_file argument.
        @type bond_length:          float or None
        @keyword bond_length_file:  The file of bond lengths for each atom pair in meters.  The first and second columns must be the spin ID strings and the third column must contain the data.
        @type bond_length_file:     float or None
        @keyword log:               Log file output flag (only for certain stages).
        @type log:                  bool
        @keyword bucket_num:        Number of buckets for the distribution plots.
        @type bucket_num:           int
        @keyword lower_lim_noe:     Distribution plot limits.
        @type lower_lim_noe:        int
        @keyword upper_lim_noe:     Distribution plot limits.
        @type upper_lim_noe:        int
        @keyword lower_lim_rdc:     Distribution plot limits.
        @type lower_lim_rdc:        int
        @keyword upper_lim_rdc:     Distribution plot limits.
        @type upper_lim_rdc:        int
        """

        # Execution lock.
        status.exec_lock.acquire('auto stereochem analysis', mode='auto-analysis')

        # Set up the analysis status object.
        status.init_auto_analysis('stereochem', type='stereochem')
        status.current_analysis = 'auto stereochem analysis'

        # Store all the args.
        self.stage = stage
        self.results_dir = results_dir
        self.num_ens = num_ens
        self.num_models = num_models
        self.configs = configs
        self.snapshot_dir = snapshot_dir
        self.snapshot_min = snapshot_min
        self.snapshot_max = snapshot_max
        self.pseudo = pseudo
        self.noe_file = noe_file
        self.noe_norm = noe_norm
        self.rdc_name = rdc_name
        self.rdc_file = rdc_file
        self.rdc_spin_id1_col = rdc_spin_id1_col
        self.rdc_spin_id2_col = rdc_spin_id2_col
        self.rdc_data_col = rdc_data_col
        self.rdc_error_col = rdc_error_col
        self.bond_length = bond_length
        self.bond_length_file = bond_length_file
        self.log = log
        self.bucket_num = bucket_num
        self.lower_lim_noe = lower_lim_noe
        self.upper_lim_noe = upper_lim_noe
        self.lower_lim_rdc = lower_lim_rdc
        self.upper_lim_rdc = upper_lim_rdc

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Create the results directory.
        if self.results_dir:
            mkdir_nofail(self.results_dir)

        # Or use the current working directory.
        else:
            self.results_dir = getcwd()

        # Create a directory for log files.
        if self.log:
            mkdir_nofail(self.results_dir + sep + "logs")

        # Finish and unlock execution.
        status.auto_analysis['stereochem'].fin = True
        status.current_analysis = None
        status.exec_lock.release()


    def run(self):
        """Execute the given stage of the analysis."""

        # Store the original STDOUT.
        self.stdout_orig = sys.stdout

        # Sampling of snapshots.
        if self.stage == 1:
            self.sample()

        # NOE violation analysis.
        elif self.stage == 2:
            self.noe_viol()

        # Ensemble superimposition.
        elif self.stage == 3:
            self.superimpose()

        # RDC Q-factor analysis.
        elif self.stage == 4:
            self.rdc_analysis()

        # Grace plot creation.
        elif self.stage == 5:
            self.grace_plots()

        # Final combined Q ordering.
        elif self.stage == 6:
            self.combined_q()

        # Unknown stage.
        else:
            raise RelaxError("The stage number %s is unknown." % self.stage)

        # Restore STDOUT.
        sys.stdout = self.stdout_orig


    def combined_q(self):
        """Calculate the combined Q-factor.

        The combined Q is defined as::

            Q_total^2 = Q_NOE^2 + Q_RDC^2,

        and the NOE Q-factor as::

            Q^2 = U / sum(NOE_i^2),

        where U is the quadratic flat bottom well potential - the NOE violation in Angstrom^2.
        """

        # Checks.
        if not access(self.results_dir+sep+"NOE_viol_" + self.configs[0] + "_sorted", F_OK):
            raise RelaxError("The NOE analysis has not been performed, cannot find the file '%s'." % self.results_dir+sep+"NOE_viol_" + self.configs[0] + "_sorted")
        if not access(self.results_dir+sep+"Q_factors_" + self.configs[0] + "_sorted", F_OK):
            raise RelaxError("The RDC analysis has not been performed, cannot find the file '%s'." % self.results_dir+sep+"Q_factors_" + self.configs[0] + "_sorted")

        # Loop over the configurations.
        for i in range(len(self.configs)):
            # Print out.
            print("Creating the combined Q-factor file for configuration '%s'." % self.configs[i])

            # Open the NOE results file and read the data.
            file = open(self.results_dir+sep+"NOE_viol_" + self.configs[i])
            noe_lines = file.readlines()
            file.close()

            # Open the RDC results file and read the data.
            file = open(self.results_dir+sep+"Q_factors_" + self.configs[i])
            rdc_lines = file.readlines()
            file.close()

            # The combined Q-factor file.
            out = open(self.results_dir+sep+"Q_total_%s" % self.configs[i], 'w')
            out_sorted = open(self.results_dir+sep+"Q_total_%s_sorted" % self.configs[i], 'w')

            # Loop over the data (skipping the header line).
            data = []
            for j in range(1, len(noe_lines)):
                # Split the lines.
                ens = int(noe_lines[j].split()[0])
                noe_viol = float(noe_lines[j].split()[1])
                q_rdc = float(rdc_lines[j].split()[1])

                # The NOE Q-factor.
                q_noe = sqrt(noe_viol/self.noe_norm)

                # Combined Q.
                q = sqrt(q_noe**2 + q_rdc**2)

                # Write out the unsorted list.
                out.write("%-20i%20.15f\n" % (ens, q))

                # Store the values.
                data.append([q, ens])

            # Sort the combined Q.
            data.sort()

            # Write the data.
            for i in range(len(data)):
                out_sorted.write("%-20i%20.15f\n" % (data[i][1], data[i][0]))

            # Close the files.
            out.close()
            out_sorted.close()


    def generate_distribution(self, values, lower=0.0, upper=200.0, inc=None):
        """Create the distribution data structure."""

        # The bin width.
        bin_width = (upper - lower)/float(inc)

        # Init the dist object.
        dist = []
        for i in range(inc):
            dist.append([bin_width*i+lower, 0])

        # Loop over the values.
        for val in values:
            # The bin.
            bin = int((val - lower)/bin_width)

            # Outside of the limits.
            if bin < 0 or bin >= inc:
                print("Outside of the limits: '%s'" % val)
                continue

            # Increment the count.
            dist[bin][1] = dist[bin][1] + 1

        # Convert the counts to frequencies.
        total_pr = 0.0
        for i in range(inc):
            dist[i][1] = dist[i][1] / float(len(values))
            total_pr = total_pr + dist[i][1]

        print("Total Pr: %s" % total_pr)

        # Return the dist.
        return dist


    def grace_plots(self):
        """Generate grace plots of the results."""

        # The number of configs.
        n = len(self.configs)

        # The colours for the different configs.
        defaults = [4, 2]    # Blue and red.
        colours = []
        for i in range(n):
            # Default colours.
            if i < len(defaults):
                colours.append(defaults[i])

            # Otherwise black!
            else:
                colours.append(0)

        # The ensemble number text.
        ens_text = ''
        dividers = [1e15, 1e12, 1e9, 1e6, 1e3, 1]
        num_ens = self.num_ens
        for i in range(len(dividers)):
            # The number.
            num = int(num_ens / dividers[i])

            # The text.
            if num:
                text = repr(num)
            elif not num and ens_text:
                text = '000'
            else:
                continue

            # Update the text.
            ens_text = ens_text + text

            # A comma.
            if i < len(dividers)-1:
                ens_text = ens_text + ','

            # Remove the front part of the number.
            num_ens = num_ens - dividers[i]*num

        # Subtitle for all graphs.
        subtitle = '%s ensembles of %s' % (ens_text, self.num_models)

        # NOE violations.
        if access(self.results_dir+sep+"NOE_viol_" + self.configs[0] + "_sorted", F_OK):
            # Print out.
            print("Generating NOE violation Grace plots.")

            # Open the output files.
            grace_curve = open(self.results_dir+sep+"NOE_viol_curve.agr", 'w')
            grace_dist = open(self.results_dir+sep+"NOE_viol_dist.agr", 'w')

            # Loop over the configurations.
            data = []
            dist = []
            for i in range(n):
                # Open the results file and read the data.
                file = open(self.results_dir+sep+"NOE_viol_" + self.configs[i] + "_sorted")
                lines = file.readlines()
                file.close()

                # Add a new graph set.
                data.append([])

                # Loop over the ensembles and extract the NOE violation.
                noe_viols = []
                for j in range(1, len(lines)):
                    # Extract the violation.
                    viol = float(lines[j].split()[1])
                    noe_viols.append(viol)

                    # Add to the data structure.
                    data[i].append([j, viol])

                # Calculate the R distribution.
                dist.append(self.generate_distribution(noe_viols, inc=self.bucket_num, upper=self.upper_lim_noe, lower=self.lower_lim_noe))

            # Headers.
            write_xy_header(file=grace_curve, title='NOE violation comparison', subtitle=subtitle, sets=[n], set_names=[self.configs], set_colours=[colours], symbols=[[0]*n], axis_labels=[['Ensemble (sorted)', 'NOE violation (Angstrom\\S2\\N)']], legend_pos=[[0.3, 0.8]])
            write_xy_header(file=grace_dist, title='NOE violation comparison', subtitle=subtitle, sets=[n], set_names=[self.configs], set_colours=[colours], symbols=[[1]*n], symbol_sizes=[[0.5]*n], linestyle=[[3]*n], axis_labels=[['NOE violation (Angstrom\\S2\\N)', 'Frequency']], legend_pos=[[1.1, 0.8]])

            # Write the data.
            write_xy_data([data], file=grace_curve, graph_type='xy')
            write_xy_data([dist], file=grace_dist, graph_type='xy')

            # Close the files.
            grace_curve.close()
            grace_dist.close()

        # RDC Q-factors.
        if access(self.results_dir+sep+"Q_factors_" + self.configs[0] + "_sorted", F_OK):
            # Print out.
            print("Generating RDC Q-factor Grace plots.")

            # Open the Grace output files.
            grace_curve = open(self.results_dir+sep+"RDC_%s_curve.agr" % self.rdc_name, 'w')
            grace_dist = open(self.results_dir+sep+"RDC_%s_dist.agr" % self.rdc_name, 'w')

            # Loop over the configurations.
            data = []
            dist = []
            for i in range(n):
                # Open the results file and read the data.
                file = open(self.results_dir+sep+"Q_factors_" + self.configs[i] + "_sorted")
                lines = file.readlines()
                file.close()

                # Add a new graph set.
                data.append([])

                # Loop over the Q-factors.
                values = []
                for j in range(1, len(lines)):
                    # Extract the violation.
                    value = float(lines[j].split()[1])
                    values.append(value)

                    # Add to the data structure.
                    data[i].append([j, value])

                # Calculate the R distribution.
                dist.append(self.generate_distribution(values, inc=self.bucket_num, upper=self.upper_lim_rdc, lower=self.lower_lim_rdc))

            # Headers.
            write_xy_header(file=grace_curve, title='%s RDC Q-factor comparison' % self.rdc_name, subtitle=subtitle, sets=[n], set_names=[self.configs], set_colours=[colours], symbols=[[0]*n], axis_labels=[['Ensemble (sorted)', '%s RDC Q-factor (pales format)' % self.rdc_name]], legend_pos=[[0.3, 0.8]])
            write_xy_header(file=grace_dist, title='%s RDC Q-factor comparison' % self.rdc_name, subtitle=subtitle, sets=[n], set_names=[self.configs], set_colours=[colours], symbols=[[1]*n], symbol_sizes=[[0.5]*n], linestyle=[[3]*n], axis_labels=[['%s RDC Q-factor (pales format)' % self.rdc_name, 'Frequency']], legend_pos=[[1.1, 0.8]])

            # Write the data.
            write_xy_data([data], file=grace_curve, graph_type='xy')
            write_xy_data([dist], file=grace_dist, graph_type='xy')

            # Close the files.
            grace_curve.close()
            grace_dist.close()

        # NOE-RDC correlation plots.
        if access(self.results_dir+sep+"NOE_viol_" + self.configs[0] + "_sorted", F_OK) and access(self.results_dir+sep+"Q_factors_" + self.configs[0] + "_sorted", F_OK):
            # Print out.
            print("Generating NOE-RDC correlation Grace plots.")

            # Open the Grace output files.
            grace_file = open(self.results_dir+sep+"correlation_plot.agr", 'w')
            grace_file_scaled = open(self.results_dir+sep+"correlation_plot_scaled.agr", 'w')

            # Grace data.
            data = []
            data_scaled = []
            for i in range(len(self.configs)):
                # Open the NOE results file and read the data.
                file = open(self.results_dir+sep+"NOE_viol_" + self.configs[i])
                noe_lines = file.readlines()
                file.close()

                # Add a new graph set.
                data.append([])
                data_scaled.append([])

                # Open the RDC results file and read the data.
                file = open(self.results_dir+sep+"Q_factors_" + self.configs[i])
                rdc_lines = file.readlines()
                file.close()

                # Loop over the data.
                for j in range(1, len(noe_lines)):
                    # Split the lines.
                    noe_viol = float(noe_lines[j].split()[1])
                    q_factor = float(rdc_lines[j].split()[1])

                    # Add the xy pair.
                    data[i].append([noe_viol, q_factor])
                    data_scaled[i].append([sqrt(noe_viol/self.noe_norm), q_factor])

            # Write the data.
            write_xy_header(file=grace_file, title='Correlation plot - %s RDC vs. NOE' % self.rdc_name, subtitle=subtitle, sets=[n], set_names=[self.configs], set_colours=[colours], symbols=[[9]*n], symbol_sizes=[[0.24]*n], linetype=[[0]*n], axis_labels=[['NOE violation (Angstrom\\S2\\N)', '%s RDC Q-factor (pales format)' % self.rdc_name]], legend_pos=[[1.1, 0.8]])
            write_xy_header(file=grace_file_scaled, title='Correlation plot - %s RDC vs. NOE Q-factor' % self.rdc_name, subtitle=subtitle, sets=[n], set_names=[self.configs], set_colours=[colours], symbols=[[9]*n], symbol_sizes=[[0.24]*n], linetype=[[0]*n], axis_labels=[['Normalised NOE violation (Q = sqrt(U / \\xS\\f{}NOE\\si\\N\\S2\\N))', '%s RDC Q-factor (pales format)' % self.rdc_name]], legend_pos=[[1.1, 0.8]])
            write_xy_data([data], file=grace_file, graph_type='xy')
            write_xy_data([data_scaled], file=grace_file_scaled, graph_type='xy')


    def noe_viol(self):
        """NOE violation calculations."""

        # Redirect STDOUT to a log file.
        if self.log:
            sys.stdout = open(self.results_dir+sep+"logs" + sep + "NOE_viol.log", 'w')

        # Create a directory for the save files.
        dir = self.results_dir + sep + "NOE_results"
        mkdir_nofail(dir=dir)

        # Loop over the configurations.
        for config in self.configs:
            # Print out.
            print("\n"*10 + "# Set up for config " + config + " #" + "\n")

            # Open the results file.
            out = open(self.results_dir+sep+"NOE_viol_" + config, 'w')
            out_sorted = open(self.results_dir+sep+"NOE_viol_" + config + "_sorted", 'w')
            out.write("%-20s%20s\n" % ("# Ensemble", "NOE_volation"))
            out_sorted.write("%-20s%20s\n" % ("# Ensemble", "NOE_volation"))

            # Create the data pipe.
            self.interpreter.pipe.create("noe_viol_%s" % config, "N-state")

            # Read the first structure.
            self.interpreter.structure.read_pdb("ensembles" + sep + config + "0.pdb", dir=self.results_dir, set_mol_name=config, set_model_num=list(range(1, self.num_models+1)))

            # Load all protons as the sequence.
            self.interpreter.structure.load_spins("@H*", ave_pos=False)

            # Create the pseudo-atoms.
            for i in range(len(self.pseudo)):
                self.interpreter.spin.create_pseudo(spin_name=self.pseudo[i][0], members=self.pseudo[i][1], averaging="linear")
            self.interpreter.sequence.display()

            # Read the NOE list.
            self.interpreter.noe.read_restraints(file=self.noe_file)

            # Set up the N-state model.
            self.interpreter.n_state_model.select_model(model="fixed")

            # Print out.
            print("\n"*2 + "# Set up complete #" + "\n"*10)

            # Loop over each ensemble.
            noe_viol = []
            for ens in range(self.num_ens):
                # Print out the ensemble to both the log and screen.
                if self.log:
                    sys.stdout.write(config + repr(ens) + "\n")
                sys.stderr.write(config + repr(ens) + "\n")

                # Delete the old structures and rename the molecule.
                self.interpreter.structure.delete()

                # Read the ensemble.
                self.interpreter.structure.read_pdb("ensembles" + sep + config + repr(ens) + ".pdb", dir=self.results_dir, set_mol_name=config, set_model_num=list(range(1, self.num_models+1)))

                # Get the atomic positions.
                self.interpreter.structure.get_pos(ave_pos=False)

                # Calculate the average NOE potential.
                self.interpreter.calc()

                # Sum the violations.
                cdp.sum_viol = 0.0
                for i in range(len(cdp.ave_dist)):
                    if cdp.quad_pot[i][2]:
                        cdp.sum_viol = cdp.sum_viol + cdp.quad_pot[i][2]

                # Write out the NOE violation.
                noe_viol.append([cdp.sum_viol, ens])
                out.write("%-20i%30.15f\n" % (ens, cdp.sum_viol))

                # Save the state.
                self.interpreter.results.write(file="%s_results_%s" % (config, ens), dir=dir, force=True)

            # Sort the NOE violations.
            noe_viol.sort()

            # Write the data.
            for i in range(len(noe_viol)):
                out_sorted.write("%-20i%20.15f\n" % (noe_viol[i][1], noe_viol[i][0]))


    def rdc_analysis(self):
        """Perform the RDC part of the analysis."""

        # Redirect STDOUT to a log file.
        if self.log:
            sys.stdout = open(self.results_dir+sep+"logs" + sep + "RDC_%s_analysis.log" % self.rdc_name, 'w')

        # The dipolar constant.
        d = 0.0
        if self.bond_length != None:
            d = 3.0 / (2.0*pi) * dipolar_constant(g13C, g1H, self.bond_length)

        # Create a directory for the save files.
        dir = self.results_dir + sep + "RDC_%s_results" % self.rdc_name
        mkdir_nofail(dir=dir)

        # Loop over the configurations.
        for config in self.configs:
            # Print out.
            print("\n"*10 + "# Set up for config " + config + " #" + "\n")

            # Open the results files.
            out = open(self.results_dir+sep+"Q_factors_" + config, 'w')
            out_sorted = open(self.results_dir+sep+"Q_factors_" + config + "_sorted", 'w')
            out.write("%-20s%20s%20s\n" % ("# Ensemble", "RDC_Q_factor(pales)", "RDC_Q_factor(standard)"))
            out_sorted.write("%-20s%20s\n" % ("# Ensemble", "RDC_Q_factor(pales)"))

            # Create the data pipe.
            self.interpreter.pipe.create("rdc_analysis_%s" % config, "N-state")

            # Read the first structure.
            self.interpreter.structure.read_pdb("ensembles_superimposed" + sep + config + "0.pdb", dir=self.results_dir, set_mol_name=config, set_model_num=list(range(1, self.num_models+1)))

            # Load all spins as the sequence.
            self.interpreter.structure.load_spins(ave_pos=False)

            # Create the pseudo-atoms.
            for i in range(len(self.pseudo)):
                self.interpreter.spin.create_pseudo(spin_name=self.pseudo[i][0], members=self.pseudo[i][1], averaging="linear")
            self.interpreter.sequence.display()

            # Read the RDC data.
            self.interpreter.rdc.read(align_id=self.rdc_file, file=self.rdc_file, spin_id1_col=self.rdc_spin_id1_col, spin_id2_col=self.rdc_spin_id2_col, data_col=self.rdc_data_col, error_col=self.rdc_error_col)

            # Define the magnetic dipole-dipole relaxation interaction.
            if self.bond_length != None:
                self.interpreter.interatom.set_dist(spin_id1='@C*', spin_id2='@H*', ave_dist=self.bond_length)
                self.interpreter.interatom.set_dist(spin_id1='@C*', spin_id2='@Q*', ave_dist=self.bond_length)
            else:
                self.interpreter.interatom.read_dist(file=self.bond_length_file, spin_id1_col=1, spin_id2_col=2, data_col=3)

            # Set the nuclear isotope.
            self.interpreter.spin.isotope(isotope='13C', spin_id='@C*')
            self.interpreter.spin.isotope(isotope='1H', spin_id='@H*')
            self.interpreter.spin.isotope(isotope='1H', spin_id='@Q*')

            # Set up the model.
            self.interpreter.n_state_model.select_model(model="fixed")

            # Print out.
            print("\n"*2 + "# Set up complete #" + "\n"*10)

            # Loop over each ensemble.
            q_factors = []
            for ens in range(self.num_ens):
                # Print out the ensemble to both the log and screen.
                if self.log:
                    sys.stdout.write(config + repr(ens) + "\n")
                sys.stderr.write(config + repr(ens) + "\n")

                # Delete the old structures.
                self.interpreter.structure.delete()

                # Read the ensemble.
                self.interpreter.structure.read_pdb("ensembles_superimposed" + sep + config + repr(ens) + ".pdb", dir=self.results_dir, set_mol_name=config, set_model_num=list(range(1, self.num_models+1)))

                # Get the positional information, then load the CH vectors.
                self.interpreter.structure.get_pos(ave_pos=False)
                if self.bond_length != None:
                    self.interpreter.interatom.set_dist(spin_id1='@C*', spin_id2='@H*', ave_dist=self.bond_length)
                else:
                    self.interpreter.interatom.read_dist(file=self.bond_length_file, spin_id1_col=1, spin_id2_col=2, data_col=3)
                self.interpreter.interatom.unit_vectors(ave=False)

                # Minimisation.
                #grid_search(inc=4)
                self.interpreter.minimise("simplex", constraints=False)

                # Store and write out the Q-factors.
                q_factors.append([cdp.q_rdc, ens])
                out.write("%-20i%20.15f%20.15f\n" % (ens, cdp.q_rdc, cdp.q_rdc_norm2))

                # Calculate the alignment tensor in Hz, and store it for reference.
                cdp.align_tensor_Hz = d * cdp.align_tensors[0].A
                cdp.align_tensor_Hz_5D = d * cdp.align_tensors[0].A_5D

                # Save the state.
                self.interpreter.results.write(file="%s_results_%s" % (config, ens), dir=dir, force=True)

            # Sort the NOE violations.
            q_factors.sort()

            # Write the data.
            for i in range(len(q_factors)):
                out_sorted.write("%-20i%20.15f\n" % (q_factors[i][1], q_factors[i][0]))


    def sample(self):
        """Generate the ensembles by random sampling of the snapshots."""

        # Create the directory for the ensembles, if needed.
        mkdir_nofail(dir=self.results_dir + sep + "ensembles")

        # Loop over the configurations.
        for conf_index in range(len(self.configs)):
            # Loop over each ensemble.
            for ens in range(self.num_ens):
                # Random sampling.
                rand = []
                for j in range(self.num_models):
                    rand.append(randint(self.snapshot_min[conf_index], self.snapshot_max[conf_index]))

                # Print out.
                print("Generating ensemble %s%s from structures %s." % (self.configs[conf_index], ens, rand))

                # The file name.
                file_name = "ensembles" + sep + self.configs[conf_index] + repr(ens) + ".pdb"

                # Open the output file.
                out = open(self.results_dir+sep+file_name, 'w')

                # Header.
                out.write("REM Structures: " + repr(rand) + "\n")

                # Concatenation the files.
                for j in range(self.num_models):
                    # The random file.
                    rand_name = self.snapshot_dir[conf_index] + sep + self.configs[conf_index] + repr(rand[j]) + ".pdb"

                    # Append the file.
                    out.write(open(rand_name).read())

                # Close the file.
                out.close()


    def superimpose(self):
        """Superimpose the ensembles using fit to first in Molmol."""

        # Create the output directory.
        mkdir_nofail("ensembles_superimposed")

        # Logging turned on.
        if self.log:
            log = open(self.results_dir+sep+"logs" + sep + "superimpose_molmol.stderr", 'w')
            sys.stdout = open(self.results_dir+sep+"logs" + sep + "superimpose.log", 'w')

        # Loop over S and R.
        for config in ["R", "S"]:
            # Loop over each ensemble.
            for ens in range(self.num_ens):
                # The file names.
                file_in = "ensembles" + sep + config + repr(ens) + ".pdb"
                file_out = "ensembles_superimposed" + sep + config + repr(ens) + ".pdb"

                # Print out.
                sys.stderr.write("Superimposing %s with Molmol, output to %s.\n" % (file_in, file_out))
                if self.log:
                    log.write("\n\n\nSuperimposing %s with Molmol, output to %s.\n" % (file_in, file_out))

                # Failure handling (if a failure occurred and this is rerun, skip all existing files).
                if access(self.results_dir+sep+file_out, F_OK):
                    continue

                # Open the Molmol pipe.
                pipe = Popen("molmol -t -f -", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=False)

                # Init all.
                pipe.stdin.write("InitAll yes\n")

                # Read the PDB.
                pipe.stdin.write("ReadPdb " + self.results_dir+sep+file_in + "\n")

                # Fitting to mean.
                pipe.stdin.write("Fit to_first 'selected'\n")
                pipe.stdin.write("Fit to_mean 'selected'\n")

                # Write the result.
                pipe.stdin.write("WritePdb " + self.results_dir+sep+file_out + "\n")

                # End Molmol.
                pipe.stdin.close()

                # Get STDOUT and STDERR.
                sys.stdout.write(pipe.stdout.read())
                if self.log:
                    log.write(pipe.stderr.read())

                # Close the pipe.
                pipe.stdout.close()
                pipe.stderr.close()

                # Open the superimposed file in relax.
                self.interpreter.reset()
                self.interpreter.pipe.create('out', 'N-state')
                self.interpreter.structure.read_pdb(file_out)

                # Fix the retarded MOLMOL proton naming.
                for model in cdp.structure.structural_data:
                    # Alias.
                    mol = model.mol[0]

                    # Loop over all atoms.
                    for i in range(len(mol.atom_name)):
                        # A proton.
                        if search('H', mol.atom_name[i]):
                            mol.atom_name[i] = mol.atom_name[i][1:] + mol.atom_name[i][0]

                # Replace the superimposed file.
                self.interpreter.structure.write_pdb(config + repr(ens) + ".pdb", dir=self.results_dir+sep+"ensembles_superimposed", force=True)
Exemple #39
0
class Stereochem_analysis:
    """Class for performing the relative stereochemistry analysis."""
    def __init__(self,
                 stage=1,
                 results_dir=None,
                 num_ens=10000,
                 num_models=10,
                 configs=None,
                 snapshot_dir='snapshots',
                 snapshot_min=None,
                 snapshot_max=None,
                 pseudo=None,
                 noe_file=None,
                 noe_norm=None,
                 rdc_name=None,
                 rdc_file=None,
                 rdc_spin_id1_col=None,
                 rdc_spin_id2_col=None,
                 rdc_data_col=None,
                 rdc_error_col=None,
                 bond_length=None,
                 bond_length_file=None,
                 log=None,
                 bucket_num=200,
                 lower_lim_noe=0.0,
                 upper_lim_noe=600.0,
                 lower_lim_rdc=0.0,
                 upper_lim_rdc=1.0):
        """Set up for the stereochemistry analysis.

        @keyword stage:             Stage of analysis (see the module docstring above for the options).  
        @type stage:                int
        @keyword results_dir:       The optional directory to place all results files into.
        @type results_dir:          None or str
        @keyword num_ens:           Number of ensembles.
        @type num_ens:              int
        @keyword num_models:        Ensemble size.
        @type num_models:           int
        @keyword configs:           All the configurations.
        @type configs:              list of str
        @keyword snapshot_dir:      Snapshot directories (corresponding to the configurations).
        @type snapshot_dir:         list of str
        @keyword snapshot_min:      The number of the first snapshots (corresponding to the configurations).
        @type snapshot_min:         list of int
        @keyword snapshot_max:      The number of the last snapshots (corresponding to the configurations).
        @type snapshot_max:         list of int
        @keyword pseudo:            The list of pseudo-atoms.  Each element is a list of the pseudo-atom name and a list of all those atoms forming the pseudo-atom.  For example, pseudo = [["Q7", ["@H16", "@H17", "@H18"]], ["Q9", ["@H20", "@H21", "@H22"]]].
        @type pseudo:               list of list of str and list of str
        @keyword noe_file:          The name of the NOE restraint file.
        @type noe_file:             str
        @keyword noe_norm:          The NOE normalisation factor (equal to the sum of all NOEs squared).
        @type noe_norm:             float
        @keyword rdc_name:          The label for this RDC data set.
        @type rdc_name:             str
        @keyword rdc_file:          The name of the RDC file.
        @type rdc_file:             str
        @keyword rdc_spin_id1_col:  The spin ID column of the first spin in the RDC file.
        @type rdc_spin_id1_col:     None or int
        @keyword rdc_spin_id2_col:  The spin ID column of the second spin in the RDC file.
        @type rdc_spin_id2_col:     None or int
        @keyword rdc_data_col:      The data column of the RDC file.
        @type rdc_data_col:         int
        @keyword rdc_error_col:     The error column of the RDC file.
        @type rdc_error_col:        int
        @keyword bond_length:       The bond length value in meters.  This overrides the bond_length_file argument.
        @type bond_length:          float or None
        @keyword bond_length_file:  The file of bond lengths for each atom pair in meters.  The first and second columns must be the spin ID strings and the third column must contain the data.
        @type bond_length_file:     float or None
        @keyword log:               Log file output flag (only for certain stages).
        @type log:                  bool
        @keyword bucket_num:        Number of buckets for the distribution plots.
        @type bucket_num:           int
        @keyword lower_lim_noe:     Distribution plot limits.
        @type lower_lim_noe:        int
        @keyword upper_lim_noe:     Distribution plot limits.
        @type upper_lim_noe:        int
        @keyword lower_lim_rdc:     Distribution plot limits.
        @type lower_lim_rdc:        int
        @keyword upper_lim_rdc:     Distribution plot limits.
        @type upper_lim_rdc:        int
        """

        # Initial printout.
        title(file=sys.stdout, text="Stereochemistry auto-analysis")

        # Safely execute the full protocol.
        try:
            # Execution lock.
            status.exec_lock.acquire('auto stereochem analysis',
                                     mode='auto-analysis')

            # Set up the analysis status object.
            status.init_auto_analysis('stereochem', type='stereochem')
            status.current_analysis = 'auto stereochem analysis'

            # Store all the args.
            self.stage = stage
            self.results_dir = results_dir
            self.num_ens = num_ens
            self.num_models = num_models
            self.configs = configs
            self.snapshot_dir = snapshot_dir
            self.snapshot_min = snapshot_min
            self.snapshot_max = snapshot_max
            self.pseudo = pseudo
            self.noe_file = noe_file
            self.noe_norm = noe_norm
            self.rdc_name = rdc_name
            self.rdc_file = rdc_file
            self.rdc_spin_id1_col = rdc_spin_id1_col
            self.rdc_spin_id2_col = rdc_spin_id2_col
            self.rdc_data_col = rdc_data_col
            self.rdc_error_col = rdc_error_col
            self.bond_length = bond_length
            self.bond_length_file = bond_length_file
            self.log = log
            self.bucket_num = bucket_num
            self.lower_lim_noe = lower_lim_noe
            self.upper_lim_noe = upper_lim_noe
            self.lower_lim_rdc = lower_lim_rdc
            self.upper_lim_rdc = upper_lim_rdc

            # Load the interpreter.
            self.interpreter = Interpreter(show_script=False,
                                           raise_relax_error=True)
            self.interpreter.populate_self()
            self.interpreter.on(verbose=False)

            # Create the results directory.
            if self.results_dir:
                mkdir_nofail(self.results_dir)

            # Or use the current working directory.
            else:
                self.results_dir = getcwd()

            # Create a directory for log files.
            if self.log:
                mkdir_nofail(self.results_dir + sep + "logs")

        # Clean up.
        finally:
            # Final printout.
            title(file=sys.stdout,
                  text="Completion of the stereochemistry auto-analysis")
            print_elapsed_time(time() - status.start_time)

            # Finish and unlock execution.
            status.auto_analysis['stereochem'].fin = True
            status.current_analysis = None
            status.exec_lock.release()

    def run(self):
        """Execute the given stage of the analysis."""

        # Store the original STDOUT.
        self.stdout_orig = sys.stdout

        # Sampling of snapshots.
        if self.stage == 1:
            self.sample()

        # NOE violation analysis.
        elif self.stage == 2:
            self.noe_viol()

        # Ensemble superimposition.
        elif self.stage == 3:
            self.superimpose()

        # RDC Q factor analysis.
        elif self.stage == 4:
            self.rdc_analysis()

        # Grace plot creation.
        elif self.stage == 5:
            self.grace_plots()

        # Final combined Q ordering.
        elif self.stage == 6:
            self.combined_q()

        # Unknown stage.
        else:
            raise RelaxError("The stage number %s is unknown." % self.stage)

        # Restore STDOUT.
        sys.stdout = self.stdout_orig

    def combined_q(self):
        """Calculate the combined Q factor.

        The combined Q is defined as::

            Q_total^2 = Q_NOE^2 + Q_RDC^2,

        and the NOE Q factor as::

            Q^2 = U / sum(NOE_i^2),

        where U is the quadratic flat bottom well potential - the NOE violation in Angstrom^2.
        """

        # Checks.
        if not access(
                self.results_dir + sep + "NOE_viol_" + self.configs[0] +
                "_sorted", F_OK):
            raise RelaxError(
                "The NOE analysis has not been performed, cannot find the file '%s'."
                % self.results_dir + sep + "NOE_viol_" + self.configs[0] +
                "_sorted")
        if not access(
                self.results_dir + sep + "Q_factors_" + self.configs[0] +
                "_sorted", F_OK):
            raise RelaxError(
                "The RDC analysis has not been performed, cannot find the file '%s'."
                % self.results_dir + sep + "Q_factors_" + self.configs[0] +
                "_sorted")

        # Loop over the configurations.
        for i in range(len(self.configs)):
            # Print out.
            print(
                "Creating the combined Q factor file for configuration '%s'." %
                self.configs[i])

            # Open the NOE results file and read the data.
            file = open(self.results_dir + sep + "NOE_viol_" + self.configs[i])
            noe_lines = file.readlines()
            file.close()

            # Open the RDC results file and read the data.
            file = open(self.results_dir + sep + "Q_factors_" +
                        self.configs[i])
            rdc_lines = file.readlines()
            file.close()

            # The combined Q factor file.
            out = open(self.results_dir + sep + "Q_total_%s" % self.configs[i],
                       'w')
            out_sorted = open(
                self.results_dir + sep + "Q_total_%s_sorted" % self.configs[i],
                'w')

            # Loop over the data (skipping the header line).
            data = []
            for j in range(1, len(noe_lines)):
                # Split the lines.
                ens = int(noe_lines[j].split()[0])
                noe_viol = float(noe_lines[j].split()[1])
                q_rdc = float(rdc_lines[j].split()[1])

                # The NOE Q factor.
                q_noe = sqrt(noe_viol / self.noe_norm)

                # Combined Q.
                q = sqrt(q_noe**2 + q_rdc**2)

                # Write out the unsorted list.
                out.write("%-20i%20.15f\n" % (ens, q))

                # Store the values.
                data.append([q, ens])

            # Sort the combined Q.
            data.sort()

            # Write the data.
            for i in range(len(data)):
                out_sorted.write("%-20i%20.15f\n" % (data[i][1], data[i][0]))

            # Close the files.
            out.close()
            out_sorted.close()

    def generate_distribution(self, values, lower=0.0, upper=200.0, inc=None):
        """Create the distribution data structure."""

        # The bin width.
        bin_width = (upper - lower) / float(inc)

        # Init the dist object.
        dist = []
        for i in range(inc):
            dist.append([bin_width * i + lower, 0])

        # Loop over the values.
        for val in values:
            # The bin.
            bin = int((val - lower) / bin_width)

            # Outside of the limits.
            if bin < 0 or bin >= inc:
                print("Outside of the limits: '%s'" % val)
                continue

            # Increment the count.
            dist[bin][1] = dist[bin][1] + 1

        # Convert the counts to frequencies.
        total_pr = 0.0
        for i in range(inc):
            dist[i][1] = dist[i][1] / float(len(values))
            total_pr = total_pr + dist[i][1]

        print("Total Pr: %s" % total_pr)

        # Return the dist.
        return dist

    def grace_plots(self):
        """Generate grace plots of the results."""

        # The number of configs.
        n = len(self.configs)

        # The colours for the different configs.
        defaults = [4, 2]  # Blue and red.
        colours = []
        for i in range(n):
            # Default colours.
            if i < len(defaults):
                colours.append(defaults[i])

            # Otherwise black!
            else:
                colours.append(0)

        # The ensemble number text.
        ens_text = ''
        dividers = [1e15, 1e12, 1e9, 1e6, 1e3, 1]
        num_ens = self.num_ens
        for i in range(len(dividers)):
            # The number.
            num = int(num_ens / dividers[i])

            # The text.
            if num:
                text = repr(num)
            elif not num and ens_text:
                text = '000'
            else:
                continue

            # Update the text.
            ens_text = ens_text + text

            # A comma.
            if i < len(dividers) - 1:
                ens_text = ens_text + ','

            # Remove the front part of the number.
            num_ens = num_ens - dividers[i] * num

        # Subtitle for all graphs.
        subtitle = '%s ensembles of %s' % (ens_text, self.num_models)

        # NOE violations.
        if access(
                self.results_dir + sep + "NOE_viol_" + self.configs[0] +
                "_sorted", F_OK):
            # Print out.
            print("Generating NOE violation Grace plots.")

            # Open the output files.
            grace_curve = open(self.results_dir + sep + "NOE_viol_curve.agr",
                               'w')
            grace_dist = open(self.results_dir + sep + "NOE_viol_dist.agr",
                              'w')

            # Loop over the configurations.
            data = []
            dist = []
            for i in range(n):
                # Open the results file and read the data.
                file = open(self.results_dir + sep + "NOE_viol_" +
                            self.configs[i] + "_sorted")
                lines = file.readlines()
                file.close()

                # Add a new graph set.
                data.append([])

                # Loop over the ensembles and extract the NOE violation.
                noe_viols = []
                for j in range(1, len(lines)):
                    # Extract the violation.
                    viol = float(lines[j].split()[1])
                    noe_viols.append(viol)

                    # Add to the data structure.
                    data[i].append([j, viol])

                # Calculate the R distribution.
                dist.append(
                    self.generate_distribution(noe_viols,
                                               inc=self.bucket_num,
                                               upper=self.upper_lim_noe,
                                               lower=self.lower_lim_noe))

            # Headers.
            write_xy_header(format='grace',
                            file=grace_curve,
                            title='NOE violation comparison',
                            subtitle=subtitle,
                            sets=[n],
                            set_names=[self.configs],
                            set_colours=[colours],
                            symbols=[[0] * n],
                            axis_labels=[[
                                'Ensemble (sorted)',
                                'NOE violation (Angstrom\\S2\\N)'
                            ]],
                            legend_pos=[[0.3, 0.8]])
            write_xy_header(
                format='grace',
                file=grace_dist,
                title='NOE violation comparison',
                subtitle=subtitle,
                sets=[n],
                set_names=[self.configs],
                set_colours=[colours],
                symbols=[[1] * n],
                symbol_sizes=[[0.5] * n],
                linestyle=[[3] * n],
                axis_labels=[['NOE violation (Angstrom\\S2\\N)', 'Frequency']],
                legend_pos=[[1.1, 0.8]])

            # Write the data.
            write_xy_data(format='grace',
                          data=[data],
                          file=grace_curve,
                          graph_type='xy')
            write_xy_data(format='grace',
                          data=[dist],
                          file=grace_dist,
                          graph_type='xy')

            # Close the files.
            grace_curve.close()
            grace_dist.close()

        # RDC Q factors.
        if access(
                self.results_dir + sep + "Q_factors_" + self.configs[0] +
                "_sorted", F_OK):
            # Print out.
            print("Generating RDC Q factor Grace plots.")

            # Open the Grace output files.
            grace_curve = open(
                self.results_dir + sep + "RDC_%s_curve.agr" % self.rdc_name,
                'w')
            grace_dist = open(
                self.results_dir + sep + "RDC_%s_dist.agr" % self.rdc_name,
                'w')

            # Loop over the configurations.
            data = []
            dist = []
            for i in range(n):
                # Open the results file and read the data.
                file = open(self.results_dir + sep + "Q_factors_" +
                            self.configs[i] + "_sorted")
                lines = file.readlines()
                file.close()

                # Add a new graph set.
                data.append([])

                # Loop over the Q factors.
                values = []
                for j in range(1, len(lines)):
                    # Extract the violation.
                    value = float(lines[j].split()[1])
                    values.append(value)

                    # Add to the data structure.
                    data[i].append([j, value])

                # Calculate the R distribution.
                dist.append(
                    self.generate_distribution(values,
                                               inc=self.bucket_num,
                                               upper=self.upper_lim_rdc,
                                               lower=self.lower_lim_rdc))

            # Headers.
            write_xy_header(format='grace',
                            file=grace_curve,
                            title='%s RDC Q factor comparison' % self.rdc_name,
                            subtitle=subtitle,
                            sets=[n],
                            set_names=[self.configs],
                            set_colours=[colours],
                            symbols=[[0] * n],
                            axis_labels=[[
                                'Ensemble (sorted)',
                                '%s RDC Q factor (pales format)' %
                                self.rdc_name
                            ]],
                            legend_pos=[[0.3, 0.8]])
            write_xy_header(format='grace',
                            file=grace_dist,
                            title='%s RDC Q factor comparison' % self.rdc_name,
                            subtitle=subtitle,
                            sets=[n],
                            set_names=[self.configs],
                            set_colours=[colours],
                            symbols=[[1] * n],
                            symbol_sizes=[[0.5] * n],
                            linestyle=[[3] * n],
                            axis_labels=[[
                                '%s RDC Q factor (pales format)' %
                                self.rdc_name, 'Frequency'
                            ]],
                            legend_pos=[[1.1, 0.8]])

            # Write the data.
            write_xy_data(format='grace',
                          data=[data],
                          file=grace_curve,
                          graph_type='xy')
            write_xy_data(format='grace',
                          data=[dist],
                          file=grace_dist,
                          graph_type='xy')

            # Close the files.
            grace_curve.close()
            grace_dist.close()

        # NOE-RDC correlation plots.
        if access(
                self.results_dir + sep + "NOE_viol_" + self.configs[0] +
                "_sorted", F_OK) and access(
                    self.results_dir + sep + "Q_factors_" + self.configs[0] +
                    "_sorted", F_OK):
            # Print out.
            print("Generating NOE-RDC correlation Grace plots.")

            # Open the Grace output files.
            grace_file = open(self.results_dir + sep + "correlation_plot.agr",
                              'w')
            grace_file_scaled = open(
                self.results_dir + sep + "correlation_plot_scaled.agr", 'w')

            # Grace data.
            data = []
            data_scaled = []
            for i in range(len(self.configs)):
                # Open the NOE results file and read the data.
                file = open(self.results_dir + sep + "NOE_viol_" +
                            self.configs[i])
                noe_lines = file.readlines()
                file.close()

                # Add a new graph set.
                data.append([])
                data_scaled.append([])

                # Open the RDC results file and read the data.
                file = open(self.results_dir + sep + "Q_factors_" +
                            self.configs[i])
                rdc_lines = file.readlines()
                file.close()

                # Loop over the data.
                for j in range(1, len(noe_lines)):
                    # Split the lines.
                    noe_viol = float(noe_lines[j].split()[1])
                    q_factor = float(rdc_lines[j].split()[1])

                    # Add the xy pair.
                    data[i].append([noe_viol, q_factor])
                    data_scaled[i].append(
                        [sqrt(noe_viol / self.noe_norm), q_factor])

            # Write the data.
            write_xy_header(
                format='grace',
                file=grace_file,
                title='Correlation plot - %s RDC vs. NOE' % self.rdc_name,
                subtitle=subtitle,
                sets=[n],
                set_names=[self.configs],
                set_colours=[colours],
                symbols=[[9] * n],
                symbol_sizes=[[0.24] * n],
                linetype=[[0] * n],
                axis_labels=[[
                    'NOE violation (Angstrom\\S2\\N)',
                    '%s RDC Q factor (pales format)' % self.rdc_name
                ]],
                legend_pos=[[1.1, 0.8]])
            write_xy_header(
                format='grace',
                file=grace_file_scaled,
                title='Correlation plot - %s RDC vs. NOE Q factor' %
                self.rdc_name,
                subtitle=subtitle,
                sets=[n],
                set_names=[self.configs],
                set_colours=[colours],
                symbols=[[9] * n],
                symbol_sizes=[[0.24] * n],
                linetype=[[0] * n],
                axis_labels=[[
                    'Normalised NOE violation (Q = sqrt(U / \\xS\\f{}NOE\\si\\N\\S2\\N))',
                    '%s RDC Q factor (pales format)' % self.rdc_name
                ]],
                legend_pos=[[1.1, 0.8]])
            write_xy_data(format='grace',
                          data=[data],
                          file=grace_file,
                          graph_type='xy')
            write_xy_data(format='grace',
                          data=[data_scaled],
                          file=grace_file_scaled,
                          graph_type='xy')

    def noe_viol(self):
        """NOE violation calculations."""

        # Redirect STDOUT to a log file.
        if self.log:
            sys.stdout = open(
                self.results_dir + sep + "logs" + sep + "NOE_viol.log", 'w')

        # Create a directory for the save files.
        dir = self.results_dir + sep + "NOE_results"
        mkdir_nofail(dir=dir)

        # Loop over the configurations.
        for config in self.configs:
            # Print out.
            print("\n" * 10 + "# Set up for config " + config + " #" + "\n")

            # Open the results file.
            out = open(self.results_dir + sep + "NOE_viol_" + config, 'w')
            out_sorted = open(
                self.results_dir + sep + "NOE_viol_" + config + "_sorted", 'w')
            out.write("%-20s%20s\n" % ("# Ensemble", "NOE_volation"))
            out_sorted.write("%-20s%20s\n" % ("# Ensemble", "NOE_volation"))

            # Create the data pipe.
            self.interpreter.pipe.create("noe_viol_%s" % config, "N-state")

            # Read the first structure.
            self.interpreter.structure.read_pdb(
                "ensembles" + sep + config + "0.pdb",
                dir=self.results_dir,
                set_mol_name=config,
                set_model_num=list(range(1, self.num_models + 1)))

            # Load all protons as the sequence.
            self.interpreter.structure.load_spins("@H*", ave_pos=False)

            # Create the pseudo-atoms.
            for i in range(len(self.pseudo)):
                self.interpreter.spin.create_pseudo(
                    spin_name=self.pseudo[i][0],
                    members=self.pseudo[i][1],
                    averaging="linear")
            self.interpreter.sequence.display()

            # Read the NOE list.
            self.interpreter.noe.read_restraints(file=self.noe_file)

            # Set up the N-state model.
            self.interpreter.n_state_model.select_model(model="fixed")

            # Print out.
            print("\n" * 2 + "# Set up complete #" + "\n" * 10)

            # Loop over each ensemble.
            noe_viol = []
            for ens in range(self.num_ens):
                # Print out the ensemble to both the log and screen.
                if self.log:
                    sys.stdout.write(config + repr(ens) + "\n")
                sys.stderr.write(config + repr(ens) + "\n")

                # Delete the old structures and rename the molecule.
                self.interpreter.structure.delete()

                # Read the ensemble.
                self.interpreter.structure.read_pdb(
                    "ensembles" + sep + config + repr(ens) + ".pdb",
                    dir=self.results_dir,
                    set_mol_name=config,
                    set_model_num=list(range(1, self.num_models + 1)))

                # Get the atomic positions.
                self.interpreter.structure.get_pos(ave_pos=False)

                # Calculate the average NOE potential.
                self.interpreter.minimise.calculate()

                # Sum the violations.
                cdp.sum_viol = 0.0
                for i in range(len(cdp.ave_dist)):
                    if cdp.quad_pot[i][2]:
                        cdp.sum_viol = cdp.sum_viol + cdp.quad_pot[i][2]

                # Write out the NOE violation.
                noe_viol.append([cdp.sum_viol, ens])
                out.write("%-20i%30.15f\n" % (ens, cdp.sum_viol))

                # Save the state.
                self.interpreter.results.write(file="%s_results_%s" %
                                               (config, ens),
                                               dir=dir,
                                               force=True)

            # Sort the NOE violations.
            noe_viol.sort()

            # Write the data.
            for i in range(len(noe_viol)):
                out_sorted.write("%-20i%20.15f\n" %
                                 (noe_viol[i][1], noe_viol[i][0]))

    def rdc_analysis(self):
        """Perform the RDC part of the analysis."""

        # Redirect STDOUT to a log file.
        if self.log:
            sys.stdout = open(
                self.results_dir + sep + "logs" + sep +
                "RDC_%s_analysis.log" % self.rdc_name, 'w')

        # The dipolar constant.
        d = 0.0
        if self.bond_length != None:
            d = 3.0 / (2.0 * pi) * dipolar_constant(
                periodic_table.gyromagnetic_ratio('13C'),
                periodic_table.gyromagnetic_ratio('1H'), self.bond_length)

        # Create a directory for the save files.
        dir = self.results_dir + sep + "RDC_%s_results" % self.rdc_name
        mkdir_nofail(dir=dir)

        # Loop over the configurations.
        for config in self.configs:
            # Print out.
            print("\n" * 10 + "# Set up for config " + config + " #" + "\n")

            # Open the results files.
            out = open(self.results_dir + sep + "Q_factors_" + config, 'w')
            out_sorted = open(
                self.results_dir + sep + "Q_factors_" + config + "_sorted",
                'w')
            out.write("%-20s%20s%20s\n" % ("# Ensemble", "RDC_Q_factor(pales)",
                                           "RDC_Q_factor(standard)"))
            out_sorted.write("%-20s%20s\n" %
                             ("# Ensemble", "RDC_Q_factor(pales)"))

            # Create the data pipe.
            self.interpreter.pipe.create("rdc_analysis_%s" % config, "N-state")

            # Read the first structure.
            self.interpreter.structure.read_pdb(
                "ensembles_superimposed" + sep + config + "0.pdb",
                dir=self.results_dir,
                set_mol_name=config,
                set_model_num=list(range(1, self.num_models + 1)))

            # Load all spins as the sequence.
            self.interpreter.structure.load_spins(ave_pos=False)

            # Create the pseudo-atoms.
            for i in range(len(self.pseudo)):
                self.interpreter.spin.create_pseudo(
                    spin_name=self.pseudo[i][0],
                    members=self.pseudo[i][1],
                    averaging="linear")
            self.interpreter.sequence.display()

            # Read the RDC data.
            self.interpreter.rdc.read(align_id=self.rdc_file,
                                      file=self.rdc_file,
                                      spin_id1_col=self.rdc_spin_id1_col,
                                      spin_id2_col=self.rdc_spin_id2_col,
                                      data_col=self.rdc_data_col,
                                      error_col=self.rdc_error_col)

            # Define the magnetic dipole-dipole relaxation interaction.
            if self.bond_length != None:
                self.interpreter.interatom.set_dist(spin_id1='@C*',
                                                    spin_id2='@H*',
                                                    ave_dist=self.bond_length)
                self.interpreter.interatom.set_dist(spin_id1='@C*',
                                                    spin_id2='@Q*',
                                                    ave_dist=self.bond_length)
            else:
                self.interpreter.interatom.read_dist(
                    file=self.bond_length_file,
                    spin_id1_col=1,
                    spin_id2_col=2,
                    data_col=3)

            # Set the nuclear isotope.
            self.interpreter.spin.isotope(isotope='13C', spin_id='@C*')
            self.interpreter.spin.isotope(isotope='1H', spin_id='@H*')
            self.interpreter.spin.isotope(isotope='1H', spin_id='@Q*')

            # Set up the model.
            self.interpreter.n_state_model.select_model(model="fixed")

            # Print out.
            print("\n" * 2 + "# Set up complete #" + "\n" * 10)

            # Loop over each ensemble.
            q_factors = []
            for ens in range(self.num_ens):
                # Print out the ensemble to both the log and screen.
                if self.log:
                    sys.stdout.write(config + repr(ens) + "\n")
                sys.stderr.write(config + repr(ens) + "\n")

                # Delete the old structures.
                self.interpreter.structure.delete()

                # Read the ensemble.
                self.interpreter.structure.read_pdb(
                    "ensembles_superimposed" + sep + config + repr(ens) +
                    ".pdb",
                    dir=self.results_dir,
                    set_mol_name=config,
                    set_model_num=list(range(1, self.num_models + 1)))

                # Get the positional information, then load the CH vectors.
                self.interpreter.structure.get_pos(ave_pos=False)
                if self.bond_length != None:
                    self.interpreter.interatom.set_dist(
                        spin_id1='@C*',
                        spin_id2='@H*',
                        ave_dist=self.bond_length)
                else:
                    self.interpreter.interatom.read_dist(
                        file=self.bond_length_file,
                        spin_id1_col=1,
                        spin_id2_col=2,
                        data_col=3)
                self.interpreter.interatom.unit_vectors(ave=False)

                # Minimisation.
                #minimise.grid_search(inc=4)
                self.interpreter.minimise.execute("simplex", constraints=False)

                # Store and write out the Q factors.
                q_factors.append([cdp.q_rdc_norm_squared_sum, ens])
                out.write("%-20i%20.15f%20.15f\n" %
                          (ens, cdp.q_rdc_norm_squared_sum,
                           cdp.q_rdc_norm_squared_sum))

                # Calculate the alignment tensor in Hz, and store it for reference.
                cdp.align_tensor_Hz = d * cdp.align_tensors[0].A
                cdp.align_tensor_Hz_5D = d * cdp.align_tensors[0].A_5D

                # Save the state.
                self.interpreter.results.write(file="%s_results_%s" %
                                               (config, ens),
                                               dir=dir,
                                               force=True)

            # Sort the NOE violations.
            q_factors.sort()

            # Write the data.
            for i in range(len(q_factors)):
                out_sorted.write("%-20i%20.15f\n" %
                                 (q_factors[i][1], q_factors[i][0]))

    def sample(self):
        """Generate the ensembles by random sampling of the snapshots."""

        # Create the directory for the ensembles, if needed.
        mkdir_nofail(dir=self.results_dir + sep + "ensembles")

        # Loop over the configurations.
        for conf_index in range(len(self.configs)):
            # Loop over each ensemble.
            for ens in range(self.num_ens):
                # Random sampling.
                rand = []
                for j in range(self.num_models):
                    rand.append(
                        randint(self.snapshot_min[conf_index],
                                self.snapshot_max[conf_index]))

                # Print out.
                print("Generating ensemble %s%s from structures %s." %
                      (self.configs[conf_index], ens, rand))

                # The file name.
                file_name = "ensembles" + sep + self.configs[
                    conf_index] + repr(ens) + ".pdb"

                # Open the output file.
                out = open(self.results_dir + sep + file_name, 'w')

                # Header.
                out.write("REM Structures: " + repr(rand) + "\n")

                # Concatenation the files.
                for j in range(self.num_models):
                    # The random file.
                    rand_name = self.snapshot_dir[
                        conf_index] + sep + self.configs[conf_index] + repr(
                            rand[j]) + ".pdb"

                    # Append the file.
                    out.write(open(rand_name).read())

                # Close the file.
                out.close()

    def superimpose(self):
        """Superimpose the ensembles using fit to first in Molmol."""

        # Create the output directory.
        mkdir_nofail("ensembles_superimposed")

        # Logging turned on.
        if self.log:
            log = open(
                self.results_dir + sep + "logs" + sep +
                "superimpose_molmol.stderr", 'w')
            sys.stdout = open(
                self.results_dir + sep + "logs" + sep + "superimpose.log", 'w')

        # Loop over S and R.
        for config in ["R", "S"]:
            # Loop over each ensemble.
            for ens in range(self.num_ens):
                # The file names.
                file_in = "ensembles" + sep + config + repr(ens) + ".pdb"
                file_out = "ensembles_superimposed" + sep + config + repr(
                    ens) + ".pdb"

                # Print out.
                sys.stderr.write(
                    "Superimposing %s with Molmol, output to %s.\n" %
                    (file_in, file_out))
                if self.log:
                    log.write(
                        "\n\n\nSuperimposing %s with Molmol, output to %s.\n" %
                        (file_in, file_out))

                # Failure handling (if a failure occurred and this is rerun, skip all existing files).
                if access(self.results_dir + sep + file_out, F_OK):
                    continue

                # Open the Molmol pipe.
                pipe = Popen("molmol -t -f -",
                             shell=True,
                             stdin=PIPE,
                             stdout=PIPE,
                             stderr=PIPE,
                             close_fds=False)

                # Init all.
                pipe.stdin.write("InitAll yes\n")

                # Read the PDB.
                pipe.stdin.write("ReadPdb " + self.results_dir + sep +
                                 file_in + "\n")

                # Fitting to mean.
                pipe.stdin.write("Fit to_first 'selected'\n")
                pipe.stdin.write("Fit to_mean 'selected'\n")

                # Write the result.
                pipe.stdin.write("WritePdb " + self.results_dir + sep +
                                 file_out + "\n")

                # End Molmol.
                pipe.stdin.close()

                # Get STDOUT and STDERR.
                sys.stdout.write(pipe.stdout.read())
                if self.log:
                    log.write(pipe.stderr.read())

                # Close the pipe.
                pipe.stdout.close()
                pipe.stderr.close()

                # Open the superimposed file in relax.
                self.interpreter.reset()
                self.interpreter.pipe.create('out', 'N-state')
                self.interpreter.structure.read_pdb(file_out)

                # Fix the retarded MOLMOL proton naming.
                for model in cdp.structure.structural_data:
                    # Alias.
                    mol = model.mol[0]

                    # Loop over all atoms.
                    for i in range(len(mol.atom_name)):
                        # A proton.
                        if search('H', mol.atom_name[i]):
                            mol.atom_name[
                                i] = mol.atom_name[i][1:] + mol.atom_name[i][0]

                # Replace the superimposed file.
                self.interpreter.structure.write_pdb(
                    config + repr(ens) + ".pdb",
                    dir=self.results_dir + sep + "ensembles_superimposed",
                    force=True)
    def __init__(self, pipe_name=None, pipe_bundle=None, results_dir=None, write_results_dir=None, diff_model=None, mf_models=['m0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9'], local_tm_models=['tm0', 'tm1', 'tm2', 'tm3', 'tm4', 'tm5', 'tm6', 'tm7', 'tm8', 'tm9'], grid_inc=11, diff_tensor_grid_inc={'sphere': 11, 'prolate': 11, 'oblate': 11, 'ellipsoid': 6}, min_algor='newton', mc_sim_num=500, max_iter=None, user_fns=None, conv_loop=True):
        """Perform the full model-free analysis protocol of d'Auvergne and Gooley, 2008b.

        @keyword pipe_name:             The name of the data pipe containing the sequence info.  This data pipe should have all values set including the CSA value, the bond length, the heteronucleus name and proton name.  It should also have all relaxation data loaded.
        @type pipe_name:                str
        @keyword pipe_bundle:           The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:              str
        @keyword results_dir:           The directory where optimisation results will read from.  Results will also be saved to this directory if the write_results_dir argument is not given.
        @type results_dir:              str
        @keyword write_results_dir:     The directory where optimisation results will be saved in.  If None, it will default to the value of the results_dir argument.  This is mainly used for debugging.
        @type write_results_dir:        str or None
        @keyword diff_model:            The global diffusion model to optimise.  This can be one of 'local_tm', 'sphere', 'oblate', 'prolate', 'ellipsoid', or 'final'.  If all or a subset of these are supplied as a list, then these will be automatically looped over and calculated.
        @type diff_model:               str or list of str
        @keyword mf_models:             The model-free models.
        @type mf_models:                list of str
        @keyword local_tm_models:       The model-free models.
        @type local_tm_models:          list of str
        @keyword grid_inc:              The grid search size (the number of increments per dimension).
        @type grid_inc:                 int
        @keyword diff_tensor_grid_inc:  A list of grid search sizes for the optimisation of the sphere, prolate spheroid, oblate spheroid, and ellipsoid.
        @type diff_tensor_grid_inc:     list of int
        @keyword min_algor:             The minimisation algorithm (in most cases this should not be changed).
        @type min_algor:                str
        @keyword mc_sim_num:            The number of Monte Carlo simulations to be used for error analysis at the end of the analysis.
        @type mc_sim_num:               int
        @keyword max_iter:              The maximum number of iterations for the global iteration.  Set to None, then the algorithm iterates until convergence.
        @type max_iter:                 int or None.
        @keyword user_fns:              A dictionary of replacement user functions.  These will overwrite the standard user functions.  The key should be the name of the user function or user function class and the value should be the function or class instance.
        @type user_fns:                 dict
        @keyword conv_loop:             Automatic looping over all rounds until convergence.
        @type conv_loop:                bool
        """

        # Execution lock.
        status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

        # Store the args.
        self.pipe_name = pipe_name
        self.pipe_bundle = pipe_bundle
        self.mf_models = mf_models
        self.local_tm_models = local_tm_models
        self.grid_inc = grid_inc
        self.diff_tensor_grid_inc = diff_tensor_grid_inc
        self.min_algor = min_algor
        self.mc_sim_num = mc_sim_num
        self.max_iter = max_iter
        self.conv_loop = conv_loop

        # The model-free data pipe names.
        self.mf_model_pipes = []
        for i in range(len(self.mf_models)):
            self.mf_model_pipes.append(self.name_pipe(self.mf_models[i]))
        self.local_tm_model_pipes = []
        for i in range(len(self.local_tm_models)):
            self.local_tm_model_pipes.append(self.name_pipe(self.local_tm_models[i]))

        # The diffusion models.
        if isinstance(diff_model, list):
            self.diff_model_list = diff_model
        else:
            self.diff_model_list = [diff_model]

        # Project directory (i.e. directory containing the model-free model results and the newly generated files)
        if results_dir:
            self.results_dir = results_dir + sep
        else:
            self.results_dir = getcwd() + sep
        if write_results_dir:
            self.write_results_dir = write_results_dir + sep
        else:
            self.write_results_dir = self.results_dir

        # Data checks.
        self.check_vars()

        # Set the data pipe to the current data pipe.
        if self.pipe_name != cdp_name():
            switch(self.pipe_name)

        # Some info for the status.
        self.status_setup()

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Replacement user functions.
        if user_fns:
            for name in user_fns:
                setattr(self.interpreter, name, user_fns[name])

        # Execute the protocol.
        try:
            # Loop over the models.
            for self.diff_model in self.diff_model_list:
                # Wait a little while between diffusion models.
                sleep(1)

                # Set the global model name.
                status.auto_analysis[self.pipe_bundle].diff_model = self.diff_model

                # Initialise the convergence data structures.
                self.conv_data = Container()
                self.conv_data.chi2 = []
                self.conv_data.models = []
                self.conv_data.diff_vals = []
                if self.diff_model == 'sphere':
                    self.conv_data.diff_params = ['tm']
                elif self.diff_model == 'oblate' or self.diff_model == 'prolate':
                    self.conv_data.diff_params = ['tm', 'Da', 'theta', 'phi']
                elif self.diff_model == 'ellipsoid':
                    self.conv_data.diff_params = ['tm', 'Da', 'Dr', 'alpha', 'beta', 'gamma']
                self.conv_data.spin_ids = []
                self.conv_data.mf_params = []
                self.conv_data.mf_vals = []

                # Execute the analysis for each diffusion model.
                self.execute()

        # Clean up.
        finally:
            # Finish and unlock execution.
            status.auto_analysis[self.pipe_bundle].fin = True
            status.current_analysis = None
            status.exec_lock.release()
Exemple #41
0
class Test_relax_disp(TestCase):
    """Unit tests for the functions of the 'prompt.relax_disp' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_relax_disp, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.relax_disp_fns = self.interpreter.relax_disp

    def test_relax_cpmg_setup_argfail_cpmg_frq(self):
        """The cpmg_frq arg test of the relax_disp.cpmg_setup() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, int and None arguments, and skip them.
            if data[0] == 'float' or data[0] == 'int' or data[0] == 'None':
                continue

        # The argument test.
        self.assertRaises(RelaxNoneNumError,
                          self.relax_disp_fns.cpmg_setup,
                          spectrum_id='test',
                          cpmg_frq=data[1])

    def test_relax_cpmg_setup_argfail_spectrum_id(self):
        """The spectrum_id arg test of the relax_disp.cpmg_setup() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

        # The argument test.
        self.assertRaises(RelaxStrError,
                          self.relax_disp_fns.cpmg_setup,
                          spectrum_id=data[1])

    def test_relax_exp_type_argfail_exp_type(self):
        """The exp_type arg test of the relax_disp.exp_type() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

        # The argument test.
        self.assertRaises(RelaxStrError,
                          self.relax_disp_fns.exp_type,
                          exp_type=data[1])

    def test_relax_select_model_argfail_model(self):
        """The model arg test of the relax_disp.select_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

        # The argument test.
        self.assertRaises(RelaxStrError,
                          self.relax_disp_fns.select_model,
                          model=data[1])
Exemple #42
0
class Test_molecule(Molecule_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.molecule' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_molecule, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.molecule_fns = self.interpreter.molecule

    def test_copy_argfail_pipe_from(self):
        """Test the proper failure of the molecule.copy() user function for the pipe_from argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.molecule_fns.copy,
                              pipe_from=data[1],
                              mol_from='#Old mol',
                              mol_to='#Old mol')

    def test_copy_argfail_mol_from(self):
        """Test the proper failure of the molecule.copy() user function for the mol_from argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.molecule_fns.copy,
                              mol_from=data[1],
                              mol_to='#Old mol')

    def test_copy_argfail_pipe_to(self):
        """Test the proper failure of the molecule.copy() user function for the pipe_to argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.molecule_fns.copy,
                              pipe_to=data[1],
                              mol_from='#Old mol',
                              mol_to='#New mol2')

    def test_copy_argfail_mol_to(self):
        """Test the proper failure of the molecule.copy() user function for the mol_to argument."""

        # Set up some data.
        self.setup_data()

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.molecule_fns.copy,
                              mol_from='#Old mol',
                              mol_to=data[1])

    def test_create_argfail_mol_name(self):
        """Test the proper failure of the molecule.create() user function for the mol_name argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.molecule_fns.create,
                              mol_name=data[1])

    def test_delete_argfail_mol_id(self):
        """Test the proper failure of the molecule.delete() user function for the mol_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.molecule_fns.delete,
                              mol_id=data[1])

    def test_display_argfail_mol_id(self):
        """Test the proper failure of the molecule.display() user function for the mol_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.molecule_fns.display,
                              mol_id=data[1])

    def test_name_argfail_mol_id(self):
        """Test the proper failure of the molecule.name() user function for the mol_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.molecule_fns.name,
                              mol_id=data[1])

    def test_name_argfail_name(self):
        """Test the proper failure of the molecule.name() user function for the name argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.molecule_fns.name,
                              name=data[1])
class Test_n_state_model(N_state_model_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.n_state_model' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_n_state_model, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.n_state_model_fns = self.interpreter.n_state_model


    def test_CoM_argfail_pivot_point(self):
        """The pivot_point arg test of the n_state_model.CoM() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int, float, and number list arguments, and skip them (if the length is 3).
            if (data[0] == 'int list' or data[0] == 'float list' or data[0] == 'number list') and len(data[1]) == 3:
                continue

            # The argument test.
            self.assertRaises(RelaxListNumError, self.n_state_model_fns.CoM, pivot_point=data[1])


    def test_CoM_argfail_centre(self):
        """The centre arg test of the n_state_model.CoM() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, float, and number list arguments, and skip them (if the length is 3).
            if data[0] == 'None' or ((data[0] == 'int list' or data[0] == 'float list' or data[0] == 'number list') and len(data[1]) == 3):
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListNumError, self.n_state_model_fns.CoM, centre=data[1])


    def test_cone_pdb_argfail_cone_type(self):
        """The cone_type arg test of the n_state_model.cone_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.n_state_model_fns.cone_pdb, cone_type=data[1])


    def test_cone_pdb_argfail_scale(self):
        """The scale arg test of the n_state_model.cone_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, bin, and int arguments, and skip them.
            if data[0] == 'float' or data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError, self.n_state_model_fns.cone_pdb, cone_type='', scale=data[1])


    def test_cone_pdb_argfail_file(self):
        """The file arg test of the n_state_model.cone_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.n_state_model_fns.cone_pdb, cone_type='', file=data[1])


    def test_cone_pdb_argfail_dir(self):
        """The dir arg test of the n_state_model.cone_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.n_state_model_fns.cone_pdb, cone_type='', dir=data[1])


    def test_cone_pdb_argfail_force(self):
        """The force arg test of the n_state_model.cone_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.n_state_model_fns.cone_pdb, cone_type='', force=data[1])


    def test_number_of_states_argfail_N(self):
        """Failure of the N arg of the n_state_model.number_of_states() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bin and int arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError, self.n_state_model_fns.number_of_states, N=data[1])


    def test_ref_domain_argfail_ref(self):
        """Failure of the ref arg of the n_state_model.ref_domain() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.n_state_model_fns.ref_domain, ref=data[1])


    def test_select_model_argfail_model(self):
        """Failure of the model arg of the n_state_model.select_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.n_state_model_fns.select_model, model=data[1])
class Test_structure(Structure_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.structure' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_structure, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.structure_fns = self.interpreter.structure


    def test_create_diff_tensor_pdb_argfail_scale(self):
        """The scale arg test of the structure.create_diff_tensor_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, bin, and int arguments, and skip them.
            if data[0] == 'float' or data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError, self.structure_fns.create_diff_tensor_pdb, scale=data[1])


    def test_create_diff_tensor_pdb_argfail_file(self):
        """The file arg test of the structure.create_diff_tensor_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.structure_fns.create_diff_tensor_pdb, file=data[1])


    def test_create_diff_tensor_pdb_argfail_dir(self):
        """The dir arg test of the structure.create_diff_tensor_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.structure_fns.create_diff_tensor_pdb, dir=data[1])


    def test_create_diff_tensor_pdb_argfail_force(self):
        """The force arg test of the structure.create_diff_tensor_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.structure_fns.create_diff_tensor_pdb, force=data[1])


    def test_create_vector_dist_argfail_length(self):
        """The length arg test of the structure.create_vector_dist() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the number arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int' or data[0] == 'float':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError, self.structure_fns.create_vector_dist, length=data[1])


    def test_create_vector_dist_argfail_file(self):
        """The file arg test of the structure.create_vector_dist() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.structure_fns.create_vector_dist, file=data[1])


    def test_create_vector_dist_argfail_dir(self):
        """The dir arg test of the structure.create_vector_dist() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.structure_fns.create_vector_dist, dir=data[1])


    def test_create_vector_dist_argfail_symmetry(self):
        """The symmetry arg test of the structure.create_vector_dist() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.structure_fns.create_vector_dist, symmetry=data[1])


    def test_create_vector_dist_argfail_force(self):
        """The force arg test of the structure.create_vector_dist() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.structure_fns.create_vector_dist, force=data[1])


    def test_load_spins_argfail_spin_id(self):
        """The spin_id arg test of the structure.load_spins() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.structure_fns.load_spins, spin_id=data[1])


    def test_read_pdb_argfail_file(self):
        """The file arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.structure_fns.read_pdb, file=data[1])


    def test_read_pdb_argfail_dir(self):
        """The dir arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.structure_fns.read_pdb, file='test.pdb', dir=data[1])


    def test_read_pdb_argfail_read_mol(self):
        """The read_mol arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, bin, int, and int list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'bin' or data[0] == 'int' or data[0] == 'int list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntListIntError, self.structure_fns.read_pdb, file='test.pdb', read_mol=data[1])


    def test_read_pdb_argfail_set_mol_name(self):
        """The set_mol_name arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, str, and str list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str' or data[0] == 'str list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrListStrError, self.structure_fns.read_pdb, file='test.pdb', set_mol_name=data[1])


    def test_read_pdb_argfail_read_model(self):
        """The read_model arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, bin, int, and int list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'bin' or data[0] == 'int' or data[0] == 'int list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntListIntError, self.structure_fns.read_pdb, file='test.pdb', read_model=data[1])


    def test_read_pdb_argfail_set_model_num(self):
        """The set_model_num arg test of the structure.read_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, bin, int, and int list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'bin' or data[0] == 'int' or data[0] == 'int list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntListIntError, self.structure_fns.read_pdb, file='test.pdb', set_model_num=data[1])
class Test_align_tensor(Align_tensor_base_class):
    """Unit tests for the functions of the 'prompt.align_tensor' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_align_tensor, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.align_tensor_fns = self.interpreter.align_tensor


    def test_copy_argfail_tensor_from(self):
        """Failure of the tensor_from arg of the align_tensor.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.copy, tensor_from=data[1])


    def test_copy_argfail_pipe_from(self):
        """The pipe_from arg test of the align_tensor.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.copy, tensor_from='Pf1', pipe_from=data[1])


    def test_copy_argfail_tensor_to(self):
        """Failure of the tensor_to arg of the align_tensor.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.copy, tensor_to=data[1])


    def test_copy_argfail_pipe_to(self):
        """The pipe_to arg test of the align_tensor.copy() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.copy, tensor_from='Pf1', tensor_to='Pf1', pipe_to=data[1])


    def test_copy_argfail_both_pipes(self):
        """The pipe_from and pipe_to arg test of the align_tensor.copy() user function."""

        # Test that both cannot be None (the default)!
        self.assertRaises(RelaxError, self.align_tensor_fns.copy, tensor_from='Pf1', tensor_to='Pf1')


    def test_delete_argfail_tensor(self):
        """Failure of the tensor arg of the align_tensor.delete() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.delete, tensor=data[1])


    def test_display_argfail_tensor(self):
        """Failure of the tensor arg of the align_tensor.display() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.display, tensor=data[1])


    def test_init_argfail_tensor(self):
        """Failure of the tensor arg of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.init, tensor=data[1])


    def test_init_argfail_align_id(self):
        """Failure of the align_id arg of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.init, align_id=data[1])


    def test_init_argfail_domain(self):
        """Failure of the domain arg of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.align_tensor_fns.init, align_id='Pf1', domain=data[1])


    def test_init_argfail_params(self):
        """Failure of the params arg of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the tuple arguments.
            if data[0] == 'tuple' or data[0] == 'float tuple' or data[0] == 'str tuple':
                # Correct tuple length.
                if len(data[1]) == 5:
                    continue

            # The argument test.
            self.assertRaises(RelaxTupleNumError, self.align_tensor_fns.init, align_id='Pf1', params=data[1])


    def test_init_argfail_scale(self):
        """The scale arg test of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float argument, and skip it.
            if data[0] == 'float':
                continue

            # The argument test.
            self.assertRaises(RelaxFloatError, self.align_tensor_fns.init, align_id='Pf1', params=(0.0, 0.0, 0.0, 0.0, 0.0), scale=data[1])


    def test_init_argfail_angle_units(self):
        """The angle_units arg test of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.init, params=(0.0, 0.0, 0.0, 0.0, 0.0), angle_units=data[1])


    def test_init_argfail_param_types(self):
        """The proper failure of the align_tensor.init() user function for the param_types argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError, self.align_tensor_fns.init, align_id='Pf1', params=(0.0, 0.0, 0.0, 0.0, 0.0), param_types=data[1])


    def test_init_argfail_errors(self):
        """The errors arg test of the align_tensor.init() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.align_tensor_fns.init, align_id='Pf1', params=(0.0, 0.0, 0.0, 0.0, 0.0), errors=data[1])


    def test_matrix_angles_argfail_basis_set(self):
        """The proper failure of the align_tensor.matrix_angles() user function for the basis_set argument."""

        # Add an alignment tensor.
        align_tensor.init(align_id='a', params=(0.0, 0.0, 0.0, 0.0, 0.0))

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError, self.align_tensor_fns.matrix_angles, basis_set=data[1])


    def test_matrix_angles_argfail_basis_tensors(self):
        """The tensors arg unit test of the align_tensor.matrix_angles() user function."""

        # Add an alignment tensor.
        align_tensor.init(align_id='a', params=(0.0, 0.0, 0.0, 0.0, 0.0))

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListStrError, self.align_tensor_fns.matrix_angles, tensors=data[1])


    def test_reduction_argfail_full_tensor(self):
        """Failure of the full_tensor arg of the align_tensor.reduction() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.reduction, full_tensor=data[1])


    def test_reduction_argfail_red_tensor(self):
        """Failure of the red_tensor arg of the align_tensor.reduction() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.reduction, full_tensor='test', red_tensor=data[1])

    def test_set_domain_argfail_tensor(self):
        """Failure of the tensor arg of the align_tensor.set_domain() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.set_domain, tensor=data[1])


    def test_set_domain_argfail_domain(self):
        """Failure of the domain arg of the align_tensor.set_domain() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.align_tensor_fns.set_domain, domain=data[1])


    def test_svd_argfail_basis_set(self):
        """The proper failure of the align_tensor.svd() user function for the basis_set argument."""

        # Add an alignment tensor.
        align_tensor.init(align_id='a', params=(0.0, 0.0, 0.0, 0.0, 0.0))

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError, self.align_tensor_fns.svd, basis_set=data[1])


    def test_svd_argfail_basis_tensors(self):
        """The tensors arg unit test of the align_tensor.svd() user function."""

        # Add an alignment tensor.
        align_tensor.init(align_id='a', params=(0.0, 0.0, 0.0, 0.0, 0.0))

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str list arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListStrError, self.align_tensor_fns.svd, tensors=data[1])
Exemple #46
0
###############################################################################

"""Functions for the local tm grid optimisation tests."""

# Python module imports.
from math import pi
from os import sep
from re import search

# relax module imports.
from prompt.interpreter import Interpreter
from status import Status; status = Status()


# Initialise the interpreter.
interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
interpreter.populate_self()
interpreter.on()


def create_sequence(num_res):
    """Generate the required sequence.

    @param num_res:     The total number of residues to create.
    @type num_res:      int
    """

    # Create the molecule.
    interpreter.molecule.create(mol_name='Polycarbonate')

    # Create the spins and residues.
Exemple #47
0
class Test_dasha(TestCase):
    """Unit tests for the functions of the 'prompt.dasha' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_dasha, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.dasha_fns = self.interpreter.dasha

    def test_create_argfail_algor(self):
        """Failure of the algor arg of the dasha.create() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.dasha_fns.create,
                              algor=data[1])

    def test_create_argfail_dir(self):
        """Failure of the dir arg of the dasha.create() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.dasha_fns.create,
                              dir=data[1])

    def test_create_argfail_force(self):
        """The force arg test of the dasha.create() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.dasha_fns.create,
                              force=data[1])

    def test_execute_argfail_dir(self):
        """Failure of the dir arg of the dasha.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.dasha_fns.execute,
                              dir=data[1])

    def test_execute_argfail_force(self):
        """The force arg test of the dasha.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.dasha_fns.execute,
                              force=data[1])

    def test_execute_argfail_binary(self):
        """Failure of the binary arg of the dasha.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.dasha_fns.execute,
                              binary=data[1])

    def test_extract_argfail_dir(self):
        """Failure of the dir arg of the dasha.extract() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.dasha_fns.extract,
                              dir=data[1])
class dAuvergne_protocol:
    """The model-free auto-analysis."""

    # Some class variables.
    opt_func_tol = 1e-25
    opt_max_iterations = int(1e7)

    def __init__(self, pipe_name=None, pipe_bundle=None, results_dir=None, write_results_dir=None, diff_model=None, mf_models=['m0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9'], local_tm_models=['tm0', 'tm1', 'tm2', 'tm3', 'tm4', 'tm5', 'tm6', 'tm7', 'tm8', 'tm9'], grid_inc=11, diff_tensor_grid_inc={'sphere': 11, 'prolate': 11, 'oblate': 11, 'ellipsoid': 6}, min_algor='newton', mc_sim_num=500, max_iter=None, user_fns=None, conv_loop=True):
        """Perform the full model-free analysis protocol of d'Auvergne and Gooley, 2008b.

        @keyword pipe_name:             The name of the data pipe containing the sequence info.  This data pipe should have all values set including the CSA value, the bond length, the heteronucleus name and proton name.  It should also have all relaxation data loaded.
        @type pipe_name:                str
        @keyword pipe_bundle:           The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:              str
        @keyword results_dir:           The directory where optimisation results will read from.  Results will also be saved to this directory if the write_results_dir argument is not given.
        @type results_dir:              str
        @keyword write_results_dir:     The directory where optimisation results will be saved in.  If None, it will default to the value of the results_dir argument.  This is mainly used for debugging.
        @type write_results_dir:        str or None
        @keyword diff_model:            The global diffusion model to optimise.  This can be one of 'local_tm', 'sphere', 'oblate', 'prolate', 'ellipsoid', or 'final'.  If all or a subset of these are supplied as a list, then these will be automatically looped over and calculated.
        @type diff_model:               str or list of str
        @keyword mf_models:             The model-free models.
        @type mf_models:                list of str
        @keyword local_tm_models:       The model-free models.
        @type local_tm_models:          list of str
        @keyword grid_inc:              The grid search size (the number of increments per dimension).
        @type grid_inc:                 int
        @keyword diff_tensor_grid_inc:  A list of grid search sizes for the optimisation of the sphere, prolate spheroid, oblate spheroid, and ellipsoid.
        @type diff_tensor_grid_inc:     list of int
        @keyword min_algor:             The minimisation algorithm (in most cases this should not be changed).
        @type min_algor:                str
        @keyword mc_sim_num:            The number of Monte Carlo simulations to be used for error analysis at the end of the analysis.
        @type mc_sim_num:               int
        @keyword max_iter:              The maximum number of iterations for the global iteration.  Set to None, then the algorithm iterates until convergence.
        @type max_iter:                 int or None.
        @keyword user_fns:              A dictionary of replacement user functions.  These will overwrite the standard user functions.  The key should be the name of the user function or user function class and the value should be the function or class instance.
        @type user_fns:                 dict
        @keyword conv_loop:             Automatic looping over all rounds until convergence.
        @type conv_loop:                bool
        """

        # Execution lock.
        status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

        # Store the args.
        self.pipe_name = pipe_name
        self.pipe_bundle = pipe_bundle
        self.mf_models = mf_models
        self.local_tm_models = local_tm_models
        self.grid_inc = grid_inc
        self.diff_tensor_grid_inc = diff_tensor_grid_inc
        self.min_algor = min_algor
        self.mc_sim_num = mc_sim_num
        self.max_iter = max_iter
        self.conv_loop = conv_loop

        # The model-free data pipe names.
        self.mf_model_pipes = []
        for i in range(len(self.mf_models)):
            self.mf_model_pipes.append(self.name_pipe(self.mf_models[i]))
        self.local_tm_model_pipes = []
        for i in range(len(self.local_tm_models)):
            self.local_tm_model_pipes.append(self.name_pipe(self.local_tm_models[i]))

        # The diffusion models.
        if isinstance(diff_model, list):
            self.diff_model_list = diff_model
        else:
            self.diff_model_list = [diff_model]

        # Project directory (i.e. directory containing the model-free model results and the newly generated files)
        if results_dir:
            self.results_dir = results_dir + sep
        else:
            self.results_dir = getcwd() + sep
        if write_results_dir:
            self.write_results_dir = write_results_dir + sep
        else:
            self.write_results_dir = self.results_dir

        # Data checks.
        self.check_vars()

        # Set the data pipe to the current data pipe.
        if self.pipe_name != cdp_name():
            switch(self.pipe_name)

        # Some info for the status.
        self.status_setup()

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Replacement user functions.
        if user_fns:
            for name in user_fns:
                setattr(self.interpreter, name, user_fns[name])

        # Execute the protocol.
        try:
            # Loop over the models.
            for self.diff_model in self.diff_model_list:
                # Wait a little while between diffusion models.
                sleep(1)

                # Set the global model name.
                status.auto_analysis[self.pipe_bundle].diff_model = self.diff_model

                # Initialise the convergence data structures.
                self.conv_data = Container()
                self.conv_data.chi2 = []
                self.conv_data.models = []
                self.conv_data.diff_vals = []
                if self.diff_model == 'sphere':
                    self.conv_data.diff_params = ['tm']
                elif self.diff_model == 'oblate' or self.diff_model == 'prolate':
                    self.conv_data.diff_params = ['tm', 'Da', 'theta', 'phi']
                elif self.diff_model == 'ellipsoid':
                    self.conv_data.diff_params = ['tm', 'Da', 'Dr', 'alpha', 'beta', 'gamma']
                self.conv_data.spin_ids = []
                self.conv_data.mf_params = []
                self.conv_data.mf_vals = []

                # Execute the analysis for each diffusion model.
                self.execute()

        # Clean up.
        finally:
            # Finish and unlock execution.
            status.auto_analysis[self.pipe_bundle].fin = True
            status.current_analysis = None
            status.exec_lock.release()


    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # The pipe bundle.
        if not isinstance(self.pipe_bundle, str):
            raise RelaxError("The pipe bundle name '%s' is invalid." % self.pipe_bundle)

        # The diff model.
        valid_models = ['local_tm', 'sphere', 'oblate', 'prolate', 'ellipsoid', 'final']
        for i in range(len(self.diff_model_list)):
            if self.diff_model_list[i] not in valid_models:
                raise RelaxError("The diff_model value '%s' is incorrectly set.  It must be one of %s." % (self.diff_model_list[i], valid_models))

        # Model-free models.
        mf_models = ['m0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9']
        local_tm_models = ['tm0', 'tm1', 'tm2', 'tm3', 'tm4', 'tm5', 'tm6', 'tm7', 'tm8', 'tm9']
        if not isinstance(self.mf_models, list):
            raise RelaxError("The self.mf_models user variable must be a list.")
        if not isinstance(self.local_tm_models, list):
            raise RelaxError("The self.local_tm_models user variable must be a list.")
        for i in range(len(self.mf_models)):
            if self.mf_models[i] not in mf_models:
                raise RelaxError("The self.mf_models user variable '%s' is incorrectly set.  It must be one of %s." % (self.mf_models, mf_models))
        for i in range(len(self.local_tm_models)):
            if self.local_tm_models[i] not in local_tm_models:
                raise RelaxError("The self.local_tm_models user variable '%s' is incorrectly set.  It must be one of %s." % (self.local_tm_models, local_tm_models))

        # Sequence data.
        if not exists_mol_res_spin_data():
            raise RelaxNoSequenceError(self.pipe_name)

        # Relaxation data.
        if not hasattr(cdp, 'ri_ids') or len(cdp.ri_ids) == 0:
            raise RelaxNoRiError(ri_id)

        # Insufficient data.
        if len(cdp.ri_ids) <= 3:
            raise RelaxError("Insufficient relaxation data, 4 or more data sets are essential for the execution of this script.")

        # Spin vars.
        for spin, spin_id in spin_loop(return_id=True):
            # Skip deselected spins.
            if not spin.select:
                continue

            # Test if the isotope type has been set.
            if not hasattr(spin, 'isotope') or spin.isotope == None:
                raise RelaxNoValueError("nuclear isotope type", spin_id=spin_id)

            # Skip spins with no relaxation data.
            if not hasattr(spin, 'ri_data') or spin.ri_data == None:
                continue

            # Test if the CSA value has been set.
            if not hasattr(spin, 'csa') or spin.csa == None:
                raise RelaxNoValueError("CSA", spin_id=spin_id)

        # Interatomic vars.
        for interatom in interatomic_loop():
            # Get the corresponding spins.
            spin1 = return_spin(interatom.spin_id1)
            spin2 = return_spin(interatom.spin_id2)

            # Skip deselected spins.
            if not spin1.select or not spin2.select:
                continue

            # Test if the interatomic distance has been set.
            if not hasattr(interatom, 'r') or interatom.r == None:
                raise RelaxNoValueError("interatomic distance", spin_id=interatom.spin_id1, spin_id2=interatom.spin_id2)

        # Min vars.
        if not isinstance(self.grid_inc, int):
            raise RelaxError("The grid_inc user variable '%s' is incorrectly set.  It should be an integer." % self.grid_inc)
        if not isinstance(self.diff_tensor_grid_inc, dict):
            raise RelaxError("The diff_tensor_grid_inc user variable '%s' is incorrectly set.  It should be a dictionary." % self.diff_tensor_grid_inc)
        for tensor in ['sphere', 'prolate', 'oblate', 'ellipsoid']:
            if not tensor in self.diff_tensor_grid_inc:
                raise RelaxError("The diff_tensor_grid_inc user variable '%s' is incorrectly set.  It should contain the '%s' key." % (self.diff_tensor_grid_inc, tensor))
            if not isinstance(self.diff_tensor_grid_inc[tensor], int):
                raise RelaxError("The diff_tensor_grid_inc user variable '%s' is incorrectly set.  The value corresponding to the key '%s' should be an integer." % (self.diff_tensor_grid_inc, tensor))
        if not isinstance(self.min_algor, str):
            raise RelaxError("The min_algor user variable '%s' is incorrectly set.  It should be a string." % self.min_algor)
        if not isinstance(self.mc_sim_num, int):
            raise RelaxError("The mc_sim_num user variable '%s' is incorrectly set.  It should be an integer." % self.mc_sim_num)

        # Looping.
        if not isinstance(self.conv_loop, bool):
            raise RelaxError("The conv_loop user variable '%s' is incorrectly set.  It should be one of the booleans True or False." % self.conv_loop)


    def convergence(self):
        """Test for the convergence of the global model."""

        # Print out.
        print("\n\n\n")
        print("#####################")
        print("# Convergence tests #")
        print("#####################\n")

        # Maximum number of iterations reached.
        if self.max_iter and self.round > self.max_iter:
            print("Maximum number of global iterations reached.  Terminating the protocol before convergence has been reached.")
            return True

        # Store the data of the current data pipe.
        self.conv_data.chi2.append(cdp.chi2)

        # Create a string representation of the model-free models of the current data pipe.
        curr_models = ''
        for spin in spin_loop():
            if hasattr(spin, 'model'):
                if not spin.model == 'None':
                    curr_models = curr_models + spin.model
        self.conv_data.models.append(curr_models)

        # Store the diffusion tensor parameters.
        self.conv_data.diff_vals.append([])
        for param in self.conv_data.diff_params:
            # Get the parameter values.
            self.conv_data.diff_vals[-1].append(getattr(cdp.diff_tensor, param))

        # Store the model-free parameters.
        self.conv_data.mf_vals.append([])
        self.conv_data.mf_params.append([])
        self.conv_data.spin_ids.append([])
        for spin, spin_id in spin_loop(return_id=True):
            # Skip spin systems with no 'params' object.
            if not hasattr(spin, 'params'):
                continue

            # Add the spin ID, parameters, and empty value list.
            self.conv_data.spin_ids[-1].append(spin_id)
            self.conv_data.mf_params[-1].append([])
            self.conv_data.mf_vals[-1].append([])

            # Loop over the parameters.
            for j in range(len(spin.params)):
                # Get the parameters and values.
                self.conv_data.mf_params[-1][-1].append(spin.params[j])
                self.conv_data.mf_vals[-1][-1].append(getattr(spin, spin.params[j].lower()))

        # No need for tests.
        if self.round == 1:
            print("First round of optimisation, skipping the convergence tests.\n\n\n")
            return False

        # Loop over the iterations.
        converged = False
        for i in range(self.start_round, self.round - 1):
            # Print out.
            print("\n\n\n# Comparing the current iteration to iteration %i.\n" % (i+1))

            # Index.
            index = i - self.start_round

            # Chi-squared test.
            print("Chi-squared test:")
            print("    chi2 (iter %i):  %s" % (i+1, self.conv_data.chi2[index]))
            print("        (as an IEEE-754 byte array:  %s)" % floatAsByteArray(self.conv_data.chi2[index]))
            print("    chi2 (iter %i):  %s" % (self.round, self.conv_data.chi2[-1]))
            print("        (as an IEEE-754 byte array:  %s)" % floatAsByteArray(self.conv_data.chi2[-1]))
            print("    chi2 (difference):  %s" % (self.conv_data.chi2[index] - self.conv_data.chi2[-1]))
            if self.conv_data.chi2[index] == self.conv_data.chi2[-1]:
                print("    The chi-squared value has converged.\n")
            else:
                print("    The chi-squared value has not converged.\n")
                continue

            # Identical model-free model test.
            print("Identical model-free models test:")
            if self.conv_data.models[index] == self.conv_data.models[-1]:
                print("    The model-free models have converged.\n")
            else:
                print("    The model-free models have not converged.\n")
                continue

            # Identical diffusion tensor parameter value test.
            print("Identical diffusion tensor parameter test:")
            params_converged = True
            for k in range(len(self.conv_data.diff_params)):
                # Test if not identical.
                if self.conv_data.diff_vals[index][k] != self.conv_data.diff_vals[-1][k]:
                    print("    Parameter:   %s" % param)
                    print("    Value (iter %i):  %s" % (i+1, self.conv_data.diff_vals[index][k]))
                    print("        (as an IEEE-754 byte array:  %s)" % floatAsByteArray(self.conv_data.diff_vals[index][k]))
                    print("    Value (iter %i):  %s" % (self.round, self.conv_data.diff_vals[-1][k]))
                    print("        (as an IEEE-754 byte array:  %s)" % floatAsByteArray(self.conv_data.diff_vals[-1][k]))
                    print("    The diffusion parameters have not converged.\n")
                    params_converged = False
                    break
            if not params_converged:
                continue
            print("    The diffusion tensor parameters have converged.\n")

            # Identical model-free parameter value test.
            print("\nIdentical model-free parameter test:")
            if len(self.conv_data.spin_ids[index]) != len(self.conv_data.spin_ids[-1]):
                print("    Different number of spins.")
                continue
            for j in range(len(self.conv_data.spin_ids[-1])):
                # Loop over the parameters.
                for k in range(len(self.conv_data.mf_params[-1][j])):
                    # Test if not identical.
                    if self.conv_data.mf_vals[index][j][k] != self.conv_data.mf_vals[-1][j][k]:
                        print("    Spin ID:     %s" % self.conv_data.spin_ids[-1][j])
                        print("    Parameter:   %s" % self.conv_data.mf_params[-1][j][k])
                        print("    Value (iter %i): %s" % (i+1, self.conv_data.mf_vals[index][j][k]))
                        print("        (as an IEEE-754 byte array:  %s)" % floatAsByteArray(self.conv_data.mf_vals[index][j][k]))
                        print("    Value (iter %i): %s" % (self.round, self.conv_data.mf_vals[-1][j][k]))
                        print("        (as an IEEE-754 byte array:  %s)" % floatAsByteArray(self.conv_data.mf_vals[index][j][k]))
                        print("    The model-free parameters have not converged.\n")
                        params_converged = False
                        break
            if not params_converged:
                continue
            print("    The model-free parameters have converged.\n")

            # Convergence.
            converged = True
            break


        # Final printout.
        ##################

        print("\nConvergence:")
        if converged:
            # Update the status.
            status.auto_analysis[self.pipe_bundle].convergence = True

            # Print out.
            print("    [ Yes ]")

            # Return the termination condition.
            return True
        else:
            # Print out.
            print("    [ No ]")

            # Return False to not terminate.
            return False


    def determine_rnd(self, model=None):
        """Function for returning the name of next round of optimisation."""

        # Get a list of all files in the directory model.  If no directory exists, set the round to 'init' or 0.
        try:
            dir_list = listdir(self.results_dir+sep+model)
        except:
            return 0

        # Set the round to 'init' or 0 if there is no directory called 'init'.
        if 'init' not in dir_list:
            return 0

        # Create a list of all files which begin with 'round_'.
        rnd_dirs = []
        for file in dir_list:
            if search('^round_', file):
                rnd_dirs.append(file)

        # Create a sorted list of integer round numbers.
        numbers = []
        for dir in rnd_dirs:
            try:
                numbers.append(int(dir[6:]))
            except:
                pass
        numbers.sort()

        # No directories beginning with 'round_' exist, set the round to 1.
        if not len(numbers):
            return 1

        # The highest number.
        max_round = numbers[-1]

        # Check that the opt/results file exists for the round (working backwards).
        for i in range(max_round, -1, -1):
            # Assume the round is complete.
            complete_round = i

            # The file root.
            file_root = self.results_dir + sep + model + sep + "round_%i" % i + sep + 'opt' + sep + 'results'

            # Stop looping when the opt/results file is found.
            if access(file_root + '.bz2', F_OK):
                break
            if access(file_root + '.gz', F_OK):
                break
            if access(file_root, F_OK):
                break

        # No round, so assume the initial state.
        if complete_round == 0:
            return 0

        # Determine the number for the next round (add 1 to the highest completed round).
        return complete_round + 1


    def execute(self):
        """Execute the protocol."""

        # MI - Local tm.
        ################

        if self.diff_model == 'local_tm':
            # Base directory to place files into.
            self.base_dir = self.results_dir+'local_tm'+sep

            # Sequential optimisation of all model-free models (function must be modified to suit).
            self.multi_model(local_tm=True)

            # Model selection.
            self.model_selection(modsel_pipe=self.name_pipe('aic'), dir=self.base_dir + 'aic')


        # Diffusion models MII to MV.
        #############################

        elif self.diff_model == 'sphere' or self.diff_model == 'prolate' or self.diff_model == 'oblate' or self.diff_model == 'ellipsoid':
            # No local_tm directory!
            dir_list = listdir(self.results_dir)
            if 'local_tm' not in dir_list:
                raise RelaxError("The local_tm model must be optimised first.")

            # The initial round of optimisation - not zero if calculations were interrupted.
            self.start_round = self.determine_rnd(model=self.diff_model)

            # Loop until convergence if conv_loop is set, otherwise just loop once.
            # This looping could be made much cleaner by removing the dependence on the determine_rnd() function.
            while True:
                # Determine which round of optimisation to do (init, round_1, round_2, etc).
                self.round = self.determine_rnd(model=self.diff_model)
                status.auto_analysis[self.pipe_bundle].round = self.round

                # Inital round of optimisation for diffusion models MII to MV.
                if self.round == 0:
                    # Base directory to place files into.
                    self.base_dir = self.results_dir+self.diff_model+sep+'init'+sep

                    # Run name.
                    name = self.name_pipe(self.diff_model)

                    # Create the data pipe (deleting the old one if it exists).
                    if has_pipe(name):
                        self.interpreter.pipe.delete(name)
                    self.interpreter.pipe.create(name, 'mf', bundle=self.pipe_bundle)

                    # Load the local tm diffusion model MI results.
                    self.interpreter.results.read(file='results', dir=self.results_dir+'local_tm'+sep+'aic')

                    # Remove the tm parameter.
                    self.interpreter.model_free.remove_tm()

                    # Add an arbitrary diffusion tensor which will be optimised.
                    if self.diff_model == 'sphere':
                        self.interpreter.diffusion_tensor.init(10e-9, fixed=False)
                        inc = self.diff_tensor_grid_inc['sphere']
                    elif self.diff_model == 'prolate':
                        self.interpreter.diffusion_tensor.init((10e-9, 0, 0, 0), spheroid_type='prolate', fixed=False)
                        inc = self.diff_tensor_grid_inc['prolate']
                    elif self.diff_model == 'oblate':
                        self.interpreter.diffusion_tensor.init((10e-9, 0, 0, 0), spheroid_type='oblate', fixed=False)
                        inc = self.diff_tensor_grid_inc['oblate']
                    elif self.diff_model == 'ellipsoid':
                        self.interpreter.diffusion_tensor.init((10e-09, 0, 0, 0, 0, 0), fixed=False)
                        inc = self.diff_tensor_grid_inc['ellipsoid']

                    # Minimise just the diffusion tensor.
                    self.interpreter.fix('all_spins')
                    self.interpreter.grid_search(inc=inc)
                    self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)

                    # Write the results.
                    self.interpreter.results.write(file='results', dir=self.base_dir, force=True)


                # Normal round of optimisation for diffusion models MII to MV.
                else:
                    # Base directory to place files into.
                    self.base_dir = self.results_dir+self.diff_model + sep+'round_'+repr(self.round)+sep

                    # Load the optimised diffusion tensor from either the previous round.
                    self.load_tensor()

                    # Sequential optimisation of all model-free models (function must be modified to suit).
                    self.multi_model()

                    # Model selection.
                    self.model_selection(modsel_pipe=self.name_pipe('aic'), dir=self.base_dir + 'aic')

                    # Final optimisation of all diffusion and model-free parameters.
                    self.interpreter.fix('all', fixed=False)

                    # Minimise all parameters.
                    self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)

                    # Write the results.
                    dir = self.base_dir + 'opt'
                    self.interpreter.results.write(file='results', dir=dir, force=True)

                    # Test for convergence.
                    converged = self.convergence()

                    # Break out of the infinite while loop if automatic looping is not activated or if convergence has occurred.
                    if converged or not self.conv_loop:
                        break

            # Unset the status.
            status.auto_analysis[self.pipe_bundle].round = None


        # Final run.
        ############

        elif self.diff_model == 'final':
            # Diffusion model selection.
            ############################

            # The contents of the results directory.
            dir_list = listdir(self.results_dir)

            # Check that the minimal set of global diffusion models required for the protocol has been optimised.
            min_models = ['local_tm', 'sphere']
            for model in min_models:
                if model not in dir_list:
                    raise RelaxError("The minimum set of global diffusion models required for the protocol have not been optimised, the '%s' model results cannot be found." % model)

            # Build a list of all global diffusion models optimised.
            all_models = ['local_tm', 'sphere', 'prolate', 'oblate', 'ellipsoid']
            self.opt_models = []
            self.pipes = []
            for model in all_models:
                if model in dir_list:
                    self.opt_models.append(model)
                    self.pipes.append(self.name_pipe(model))

            # Remove all temporary pipes used in this auto-analysis.
            for name in pipe_names(bundle=self.pipe_bundle):
                if name in self.pipes + self.mf_model_pipes + self.local_tm_model_pipes + [self.name_pipe('aic'), self.name_pipe('previous')]:
                    self.interpreter.pipe.delete(name)

            # Create the local_tm data pipe.
            self.interpreter.pipe.create(self.name_pipe('local_tm'), 'mf', bundle=self.pipe_bundle)

            # Load the local tm diffusion model MI results.
            self.interpreter.results.read(file='results', dir=self.results_dir+'local_tm'+sep+'aic')

            # Loop over models MII to MV.
            for model in ['sphere', 'prolate', 'oblate', 'ellipsoid']:
                # Skip missing models.
                if model not in self.opt_models:
                    continue

                # Determine which was the last round of optimisation for each of the models.
                self.round = self.determine_rnd(model=model) - 1

                # If no directories begining with 'round_' exist, the script has not been properly utilised!
                if self.round < 1:
                    # Construct the name of the diffusion tensor.
                    name = model
                    if model == 'prolate' or model == 'oblate':
                        name = name + ' spheroid'

                    # Throw an error to prevent misuse of the script.
                    raise RelaxError("Multiple rounds of optimisation of the " + name + " (between 8 to 15) are required for the proper execution of this script.")

                # Create the data pipe.
                self.interpreter.pipe.create(self.name_pipe(model), 'mf', bundle=self.pipe_bundle)

                # Load the diffusion model results.
                self.interpreter.results.read(file='results', dir=self.results_dir+model + sep+'round_'+repr(self.round)+sep+'opt')

            # Model selection between MI to MV.
            self.model_selection(modsel_pipe=self.name_pipe('final'), write_flag=False)


            # Monte Carlo simulations.
            ##########################

            # Fix the diffusion tensor, if it exists.
            if hasattr(get_pipe(self.name_pipe('final')), 'diff_tensor'):
                self.interpreter.fix('diff')

            # Simulations.
            self.interpreter.monte_carlo.setup(number=self.mc_sim_num)
            self.interpreter.monte_carlo.create_data()
            self.interpreter.monte_carlo.initial_values()
            self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)
            self.interpreter.eliminate()
            self.interpreter.monte_carlo.error_analysis()


            # Write the final results.
            ##########################

            # Create results files and plots of the data.
            self.write_results()


        # Unknown script behaviour.
        ###########################

        else:
            raise RelaxError("Unknown diffusion model, change the value of 'self.diff_model'")


    def load_tensor(self):
        """Function for loading the optimised diffusion tensor."""

        # Create the data pipe for the previous data (deleting the old data pipe first if necessary).
        if has_pipe(self.name_pipe('previous')):
            self.interpreter.pipe.delete(self.name_pipe('previous'))
        self.interpreter.pipe.create(self.name_pipe('previous'), 'mf', bundle=self.pipe_bundle)

        # Load the optimised diffusion tensor from the initial round.
        if self.round == 1:
            self.interpreter.results.read('results', self.results_dir+self.diff_model + sep+'init')

        # Load the optimised diffusion tensor from the previous round.
        else:
            self.interpreter.results.read('results', self.results_dir+self.diff_model + sep+'round_'+repr(self.round-1)+sep+'opt')


    def model_selection(self, modsel_pipe=None, dir=None, write_flag=True):
        """Model selection function."""

        # Model selection (delete the model selection pipe if it already exists).
        if has_pipe(modsel_pipe):
            self.interpreter.pipe.delete(modsel_pipe)
        self.interpreter.model_selection(method='AIC', modsel_pipe=modsel_pipe, bundle=self.pipe_bundle, pipes=self.pipes)

        # Write the results.
        if write_flag:
            self.interpreter.results.write(file='results', dir=dir, force=True)


    def multi_model(self, local_tm=False):
        """Function for optimisation of all model-free models."""

        # Set the data pipe names (also the names of preset model-free models).
        if local_tm:
            models = self.local_tm_models
            self.pipes = self.local_tm_models
        else:
            models = self.mf_models
        self.pipes = []
        for i in range(len(models)):
            self.pipes.append(self.name_pipe(models[i]))

        # Loop over the data pipes.
        for i in range(len(models)):
            # Place the model name into the status container.
            status.auto_analysis[self.pipe_bundle].current_model = models[i]

            # Create the data pipe (by copying).
            if has_pipe(self.pipes[i]):
                self.interpreter.pipe.delete(self.pipes[i])
            self.interpreter.pipe.copy(self.pipe_name, self.pipes[i], bundle_to=self.pipe_bundle)
            self.interpreter.pipe.switch(self.pipes[i])

            # Copy the diffusion tensor from the 'opt' data pipe and prevent it from being minimised.
            if not local_tm:
                self.interpreter.diffusion_tensor.copy(self.name_pipe('previous'))
                self.interpreter.fix('diff')

            # Select the model-free model.
            self.interpreter.model_free.select_model(model=models[i])

            # Minimise.
            self.interpreter.grid_search(inc=self.grid_inc)
            self.interpreter.minimise(self.min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations)

            # Model elimination.
            self.interpreter.eliminate()

            # Write the results.
            dir = self.base_dir + models[i]
            self.interpreter.results.write(file='results', dir=dir, force=True)

        # Unset the status.
        status.auto_analysis[self.pipe_bundle].current_model = None


    def name_pipe(self, prefix):
        """Generate a unique name for the data pipe.

        @param prefix:  The prefix of the data pipe name.
        @type prefix:   str
        """

        # The unique pipe name.
        name = "%s - %s" % (prefix, self.pipe_bundle)

        # Return the name.
        return name


    def status_setup(self):
        """Initialise the status object."""

        # Initialise the status object for this auto-analysis.
        status.init_auto_analysis(self.pipe_bundle, type='dauvergne_protocol')
        status.current_analysis = self.pipe_bundle

        # The global diffusion model.
        status.auto_analysis[self.pipe_bundle].diff_model = None

        # The round of optimisation, i.e. the global iteration.
        status.auto_analysis[self.pipe_bundle].round = None

        # The list of model-free local tm models for optimisation, i.e. the global iteration.
        status.auto_analysis[self.pipe_bundle].local_tm_models = self.local_tm_models

        # The list of model-free models for optimisation, i.e. the global iteration.
        status.auto_analysis[self.pipe_bundle].mf_models = self.mf_models

        # The current model-free model.
        status.auto_analysis[self.pipe_bundle].current_model = None

        # The maximum number of iterations of the global model.
        status.auto_analysis[self.pipe_bundle].max_iter = self.max_iter

        # The convergence of the global model.
        status.auto_analysis[self.pipe_bundle].convergence = False


    def write_results(self):
        """Create Grace plots of the final model-free results."""

        # Save the results file.
        dir = self.write_results_dir + 'final'
        self.interpreter.results.write(file='results', dir=dir, force=True)

        # The Grace plots.
        dir = self.write_results_dir + 'final' + sep + 'grace'
        self.interpreter.grace.write(x_data_type='res_num', y_data_type='s2',  file='s2.agr',        dir=dir, force=True)
        self.interpreter.grace.write(x_data_type='res_num', y_data_type='s2f', file='s2f.agr',       dir=dir, force=True)
        self.interpreter.grace.write(x_data_type='res_num', y_data_type='s2s', file='s2s.agr',       dir=dir, force=True)
        self.interpreter.grace.write(x_data_type='res_num', y_data_type='te',  file='te.agr',        dir=dir, force=True)
        self.interpreter.grace.write(x_data_type='res_num', y_data_type='tf',  file='tf.agr',        dir=dir, force=True)
        self.interpreter.grace.write(x_data_type='res_num', y_data_type='ts',  file='ts.agr',        dir=dir, force=True)
        self.interpreter.grace.write(x_data_type='res_num', y_data_type='rex', file='rex.agr',       dir=dir, force=True)
        self.interpreter.grace.write(x_data_type='s2',      y_data_type='te',  file='s2_vs_te.agr',  dir=dir, force=True)
        self.interpreter.grace.write(x_data_type='s2',      y_data_type='rex', file='s2_vs_rex.agr', dir=dir, force=True)
        self.interpreter.grace.write(x_data_type='te',      y_data_type='rex', file='te_vs_rex.agr', dir=dir, force=True)

        # Write the values to text files.
        dir = self.write_results_dir + 'final'
        self.interpreter.value.write(param='s2',       file='s2.txt',       dir=dir, force=True)
        self.interpreter.value.write(param='s2f',      file='s2f.txt',      dir=dir, force=True)
        self.interpreter.value.write(param='s2s',      file='s2s.txt',      dir=dir, force=True)
        self.interpreter.value.write(param='te',       file='te.txt',       dir=dir, force=True)
        self.interpreter.value.write(param='tf',       file='tf.txt',       dir=dir, force=True)
        self.interpreter.value.write(param='ts',       file='ts.txt',       dir=dir, force=True)
        self.interpreter.value.write(param='rex',      file='rex.txt',      dir=dir, force=True)
        self.interpreter.value.write(param='local_tm', file='local_tm.txt', dir=dir, force=True)
        frqs = spectrometer.get_frequencies()
        for i in range(len(frqs)):
            comment = "This is the Rex value with units rad.s^-1 scaled to a magnetic field strength of %s MHz." % (frqs[i]/1e6)
            self.interpreter.value.write(param='rex', file='rex_%s.txt'%int(frqs[i]/1e6), dir=dir, scaling=(2.0*pi*frqs[i])**2, comment=comment, force=True)

        # Create the PyMOL macros.
        dir = self.write_results_dir + 'final' + sep + 'pymol'
        self.interpreter.pymol.macro_write(data_type='s2',        dir=dir, force=True)
        self.interpreter.pymol.macro_write(data_type='s2f',       dir=dir, force=True)
        self.interpreter.pymol.macro_write(data_type='s2s',       dir=dir, force=True)
        self.interpreter.pymol.macro_write(data_type='amp_fast',  dir=dir, force=True)
        self.interpreter.pymol.macro_write(data_type='amp_slow',  dir=dir, force=True)
        self.interpreter.pymol.macro_write(data_type='te',        dir=dir, force=True)
        self.interpreter.pymol.macro_write(data_type='tf',        dir=dir, force=True)
        self.interpreter.pymol.macro_write(data_type='ts',        dir=dir, force=True)
        self.interpreter.pymol.macro_write(data_type='time_fast', dir=dir, force=True)
        self.interpreter.pymol.macro_write(data_type='time_slow', dir=dir, force=True)
        self.interpreter.pymol.macro_write(data_type='rex',       dir=dir, force=True)

        # Create the Molmol macros.
        dir = self.write_results_dir + 'final' + sep + 'molmol'
        self.interpreter.molmol.macro_write(data_type='s2',        dir=dir, force=True)
        self.interpreter.molmol.macro_write(data_type='s2f',       dir=dir, force=True)
        self.interpreter.molmol.macro_write(data_type='s2s',       dir=dir, force=True)
        self.interpreter.molmol.macro_write(data_type='amp_fast',  dir=dir, force=True)
        self.interpreter.molmol.macro_write(data_type='amp_slow',  dir=dir, force=True)
        self.interpreter.molmol.macro_write(data_type='te',        dir=dir, force=True)
        self.interpreter.molmol.macro_write(data_type='tf',        dir=dir, force=True)
        self.interpreter.molmol.macro_write(data_type='ts',        dir=dir, force=True)
        self.interpreter.molmol.macro_write(data_type='time_fast', dir=dir, force=True)
        self.interpreter.molmol.macro_write(data_type='time_slow', dir=dir, force=True)
        self.interpreter.molmol.macro_write(data_type='rex',       dir=dir, force=True)

        # Create a diffusion tensor representation of the tensor, if a PDB file is present and the local tm global model has not been selected.
        if hasattr(cdp, 'structure') and hasattr(cdp, 'diff_tensor'):
            dir = self.write_results_dir + 'final'
            self.interpreter.structure.create_diff_tensor_pdb(file="tensor.pdb", dir=dir, force=True)
Exemple #49
0
class Relax_disp:
    """The relaxation dispersion auto-analysis."""

    # Some class variables.
    opt_func_tol = 1e-25
    opt_max_iterations = int(1e7)

    def __init__(self,
                 pipe_name=None,
                 pipe_bundle=None,
                 results_dir=None,
                 models=[MODEL_R2EFF],
                 grid_inc=11,
                 mc_sim_num=500,
                 exp_mc_sim_num=None,
                 modsel='AIC',
                 pre_run_dir=None,
                 optimise_r2eff=False,
                 insignificance=0.0,
                 numeric_only=False,
                 mc_sim_all_models=False,
                 eliminate=True,
                 set_grid_r20=False,
                 r1_fit=False):
        """Perform a full relaxation dispersion analysis for the given list of models.

        @keyword pipe_name:                 The name of the data pipe containing all of the data for the analysis.
        @type pipe_name:                    str
        @keyword pipe_bundle:               The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:                  str
        @keyword results_dir:               The directory where results files are saved.
        @type results_dir:                  str
        @keyword models:                    The list of relaxation dispersion models to optimise.
        @type models:                       list of str
        @keyword grid_inc:                  Number of grid search increments.  If set to None, then the grid search will be turned off and the default parameter values will be used instead.
        @type grid_inc:                     int or None
        @keyword mc_sim_num:                The number of Monte Carlo simulations to be used for error analysis at the end of the analysis.
        @type mc_sim_num:                   int
        @keyword exp_mc_sim_num:            The number of Monte Carlo simulations for the error analysis in the 'R2eff' model when exponential curves are fitted.  This defaults to the value of the mc_sim_num argument when not given.  When set to '-1', the R2eff errors are estimated from the Covariance matrix.  For the 2-point fixed-time calculation for the 'R2eff' model, this argument is ignored.
        @type exp_mc_sim_num:               int or None
        @keyword modsel:                    The model selection technique to use in the analysis to determine which model is the best for each spin cluster.  This can currently be one of 'AIC', 'AICc', and 'BIC'.
        @type modsel:                       str
        @keyword pre_run_dir:               The optional directory containing the dispersion auto-analysis results from a previous run.  The optimised parameters from these previous results will be used as the starting point for optimisation rather than performing a grid search.  This is essential for when large spin clusters are specified, as a grid search becomes prohibitively expensive with clusters of three or more spins.  At some point a RelaxError will occur because the grid search is impossibly large.  For the cluster specific parameters, i.e. the populations of the states and the exchange parameters, an average value will be used as the starting point.  For all other parameters, the R20 values for each spin and magnetic field, as well as the parameters related to the chemical shift difference dw, the optimised values of the previous run will be directly copied.
        @type pre_run_dir:                  None or str
        @keyword optimise_r2eff:            Flag to specify if the read previous R2eff results should be optimised.  For R1rho models where the error of R2eff values are determined by Monte-Carlo simulations, it can be valuable to make an initial R2eff run with a high number of Monte-Carlo simulations.  Any subsequent model analysis can then be based on these R2eff values, without optimising the R2eff values.
        @type optimise_r2eff:               bool
        @keyword insignificance:            The R2eff/R1rho value in rad/s by which to judge insignificance.  If the maximum difference between two points on all dispersion curves for a spin is less than this value, that spin will be deselected.  This does not affect the 'No Rex' model.  Set this value to 0.0 to use all data.  The value will be passed on to the relax_disp.insignificance user function.
        @type insignificance:               float
        @keyword numeric_only:              The class of models to use in the model selection.  The default of False allows all dispersion models to be used in the analysis (no exchange, the analytic models and the numeric models).  The value of True will activate a pure numeric solution - the analytic models will be optimised, as they are very useful for replacing the grid search for the numeric models, but the final model selection will not include them.
        @type numeric_only:                 bool
        @keyword mc_sim_all_models:         A flag which if True will cause Monte Carlo simulations to be performed for each individual model.  Otherwise Monte Carlo simulations will be reserved for the final model.
        @type mc_sim_all_models:            bool
        @keyword eliminate:                 A flag which if True will enable the elimination of failed models and failed Monte Carlo simulations through the eliminate user function.
        @type eliminate:                    bool
        @keyword set_grid_r20:              A flag which if True will set the grid R20 values from the minimum R2eff values through the r20_from_min_r2eff user function. This will speed up the grid search with a factor GRID_INC^(Nr_spec_freq). For a CPMG experiment with two fields and standard GRID_INC=21, the speed-up is a factor 441.
        @type set_grid_r20:                 bool
        @keyword r1_fit:                    A flag which if True will activate R1 parameter fitting via relax_disp.r1_fit for the models that support it.  If False, then the relax_disp.r1_fit user function will not be called.
        """

        # Initial printout.
        title(file=sys.stdout,
              text="Relaxation dispersion auto-analysis",
              prespace=4)

        # Safely execute the full protocol.
        try:
            # Execution lock.
            status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

            # Set up the analysis status object.
            status.init_auto_analysis(pipe_bundle, type='relax_disp')
            status.current_analysis = pipe_bundle

            # Store the args.
            self.pipe_name = pipe_name
            self.pipe_bundle = pipe_bundle
            self.results_dir = results_dir
            self.grid_inc = grid_inc
            self.mc_sim_num = mc_sim_num
            self.exp_mc_sim_num = exp_mc_sim_num
            self.models = models
            self.modsel = modsel
            self.pre_run_dir = pre_run_dir
            self.optimise_r2eff = optimise_r2eff
            self.insignificance = insignificance
            self.set_grid_r20 = set_grid_r20
            self.numeric_only = numeric_only
            self.mc_sim_all_models = mc_sim_all_models
            self.eliminate = eliminate
            self.r1_fit = r1_fit

            # No results directory, so default to the current directory.
            if not self.results_dir:
                self.results_dir = getcwd()

            # Data checks.
            self.check_vars()

            # Check for numerical model using numpy version under 1.8.
            # This will result in slow "for loop" calculation through data, making the analysis 5-6 times slower.
            self.check_numpy_less_1_8_and_numerical_model()

            # Load the interpreter.
            self.interpreter = Interpreter(show_script=False,
                                           raise_relax_error=True)
            self.interpreter.populate_self()
            self.interpreter.on(verbose=False)

            # Execute.
            self.run()

        # Clean up.
        finally:
            # Final printout.
            title(file=sys.stdout,
                  text="Completion of the relaxation dispersion auto-analysis",
                  prespace=4)
            print_elapsed_time(time() - status.start_time)

            # Finish and unlock execution.
            status.auto_analysis[self.pipe_bundle].fin = True
            status.current_analysis = None
            status.exec_lock.release()

    def is_model_for_selection(self, model=None):
        """Determine if the model should be used for model selection.

        @keyword model: The model to check.
        @type model:    str
        @return:        True if the model should be included in the model selection list, False if not.
        @rtype:         bool
        """

        # Skip the 'R2eff' base model.
        if model == MODEL_R2EFF:
            return False

        # Do not use the analytic models.
        if self.numeric_only and model in MODEL_LIST_ANALYTIC:
            return False

        # All models allowed.
        return True

    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # Printout.
        section(file=sys.stdout, text="Variable checking", prespace=2)

        # The pipe name.
        if not has_pipe(self.pipe_name):
            raise RelaxNoPipeError(self.pipe_name)

        # Check the model selection.
        allowed = ['AIC', 'AICc', 'BIC']
        if self.modsel not in allowed:
            raise RelaxError(
                "The model selection technique '%s' is not in the allowed list of %s."
                % (self.modsel, allowed))

        # Some warning for the user if the pure numeric solution is selected.
        if self.numeric_only:
            # Loop over all models.
            for model in self.models:
                # Skip the models used for nesting.
                if model in MODEL_LIST_NEST:
                    continue

                # Warnings for all other analytic models.
                if model in MODEL_LIST_ANALYTIC:
                    warn(
                        RelaxWarning(
                            "The analytic model '%s' will be optimised but will not be used in any way in this numeric model only auto-analysis."
                            % model))

        # Printout.
        print("The dispersion auto-analysis variables are OK.")

    def check_numpy_less_1_8_and_numerical_model(self):
        """Check for numerical model using numpy version under 1.8.  This will result in slow "for loop" calculation through data, making the analysis 5-6 times slower."""

        # Some warning for the user if the pure numeric solution is selected.
        if float(version.version[:3]) < 1.8:
            # Store which models are in numeric.
            models = []

            # Loop through models.
            for model in self.models:
                if model in MODEL_LIST_NUMERIC:
                    models.append(model)

            # Write system message if numerical models is present and numpy version is below 1.8.
            if len(models) > 0:
                # Printout.
                section(file=sys.stdout,
                        text="Numpy version checking for numerical models.",
                        prespace=2)
                warn(
                    RelaxWarning(
                        "Your version of numpy is %s, and below the recommended version of 1.8 for numerical models."
                        % (version.version)))
                warn(
                    RelaxWarning(
                        "Please consider upgrading your numpy version to 1.8.")
                )

                # Loop over models.
                for model in models:
                    warn(
                        RelaxWarning(
                            "This could make the numerical analysis with model '%s', 5 to 6 times slower."
                            % (model)))

    def error_analysis(self):
        """Perform an error analysis of the peak intensities for each field strength separately."""

        # Printout.
        section(file=sys.stdout, text="Error analysis", prespace=2)

        # Check if intensity errors have already been calculated by the user.
        precalc = True
        for spin in spin_loop(skip_desel=True):
            # No structure.
            if not hasattr(spin, 'peak_intensity_err'):
                precalc = False
                break

            # Determine if a spectrum ID is missing from the list.
            for id in cdp.spectrum_ids:
                if id not in spin.peak_intensity_err:
                    precalc = False
                    break

        # Skip.
        if precalc:
            print(
                "Skipping the error analysis as it has already been performed."
            )
            return

        # Perform the error analysis.
        self.interpreter.spectrum.error_analysis_per_field()

    def name_pipe(self, prefix):
        """Generate a unique name for the data pipe.

        @param prefix:  The prefix of the data pipe name.
        @type prefix:   str
        """

        # The unique pipe name.
        name = "%s - %s" % (prefix, self.pipe_bundle)

        # Return the name.
        return name

    def nesting(self, model=None):
        """Support for model nesting.

        If model nesting is detected, the optimised parameters from the simpler model will be used for the more complex model.  The method will then signal if the nesting condition is met for the model, allowing the grid search to be skipped.


        @keyword model: The model to be optimised.
        @type model:    str
        @return:        True if the model parameters is equivalent to the nested model, and all parameters are copied.  False if none or some of the parameters have been translated from the nested model.  Here the Grid search should still be performed.
        @rtype:         bool
        """

        # Printout.
        subsection(file=sys.stdout,
                   text="Nesting and model equivalence checks",
                   prespace=1)

        # The simpler model.
        model_info, comparable_model_info = nesting_model(
            self_models=self.models, model=model)
        if comparable_model_info != None:
            nested_pipe = self.name_pipe(comparable_model_info.model)
        else:
            nested_pipe = None

        # No nesting.
        if not nested_pipe:
            print("No model nesting or model equivalence detected.")
            return False

        # Copying the parameters to a numerical model from an analytic solution.
        if model_info.eq in [EQ_NUMERIC, EQ_SILICO
                             ] and comparable_model_info.eq == EQ_ANALYTIC:
            analytic = True
        else:
            analytic = False

        # Determine if model is equivalent or nested.
        if model_info.params == comparable_model_info.params:
            equivalent = True
        else:
            equivalent = False

        # Printout.
        if equivalent:
            print(
                "Model equivalence detected, copying the optimised parameters from the '%s' model rather than performing a grid search."
                % comparable_model_info.model)
        else:
            print(
                "Model nesting detected, translating the optimised parameters %s from the '%s' model to the parameters %s of model '%s'.  A grid search is issued for the remaining parameters."
                % (comparable_model_info.params, comparable_model_info.model,
                   model_info.params, model))
        if analytic:
            print("The parameters are copied from a %s model to a %s model." %
                  (comparable_model_info.eq, model_info.eq))

        # Get the dictionary of how the model parameters of the current model can be copied.
        par_dic = nesting_param(
            model_params=model_info.params,
            nested_model_params=comparable_model_info.params)

        # Loop over the parameters in current model.
        for param in model_info.params:
            # Extract how parameter is translated.
            param_conv = par_dic[param]

            # If the param_conv is None, then continue.
            if param_conv == None:
                continue

            print("Copying from parameter '%s' to '%s'." % (param_conv, param))

            # Loop over the spins to copy the parameters.
            for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
                # Get the nested spin.
                nested_spin = return_spin(spin_id=spin_id, pipe=nested_pipe)

                # Set value.
                # Some special conversions.
                if param_conv == '1 - pA':
                    val = 1.0 - getattr(nested_spin, 'pA')

                elif param_conv == '0.0':
                    val = 0.0

                else:
                    val = deepcopy(getattr(nested_spin, param_conv))

                # Set the attribute.
                setattr(spin, param, val)

        # Determine if model is equivalent, and should not be Grid searched, or if nested, and some parameters are pre-set. Here Grid search should still be issued.
        return equivalent

    def optimise(self, model=None, model_path=None):
        """Optimise the model, taking model nesting into account.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout.
        section(file=sys.stdout, text="Optimisation", prespace=2)

        # Deselect insignificant spins.
        if model not in [MODEL_R2EFF, MODEL_NOREX]:
            self.interpreter.relax_disp.insignificance(
                level=self.insignificance)

        # Speed-up grid-search by using minium R2eff value.
        if self.set_grid_r20 and model != MODEL_R2EFF:
            self.interpreter.relax_disp.r20_from_min_r2eff(force=True)

        # Use pre-run results as the optimisation starting point.
        # Test if file exists.
        if self.pre_run_dir:
            path = self.pre_run_dir + sep + model_path
            # File path.
            file_path = get_file_path('results', path)

            # Test if the file exists and determine the compression type.
            try:
                compress_type, file_path = determine_compression(file_path)
                res_file_exists = True

            except RelaxFileError:
                res_file_exists = False

        if self.pre_run_dir and res_file_exists:
            self.pre_run_parameters(model=model, model_path=model_path)

        # Otherwise use the normal nesting check and grid search if not nested.
        else:
            # Nested model simplification.
            nested = self.nesting(model=model)

            # Otherwise use a grid search of default values to start optimisation with.
            if not nested:
                # Grid search.
                if self.grid_inc:
                    self.interpreter.minimise.grid_search(inc=self.grid_inc)

                # Default values.
                else:
                    # The standard parameters.
                    for param in MODEL_PARAMS[model]:
                        self.interpreter.value.set(param=param, index=None)

                    # The optional R1 parameter.
                    if is_r1_optimised(model=model):
                        self.interpreter.value.set(param='r1', index=None)

        # 'R2eff' model minimisation flags.
        do_minimise = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # The minimisation algorithm to use.
            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Check if all spins contains 'r2eff and it associated error.
            has_r2eff = False

            # Loop over all spins.
            for cur_spin, spin_id in spin_loop(return_id=True,
                                               skip_desel=True):
                # Check 'r2eff'
                if hasattr(cur_spin, 'r2eff') and hasattr(
                        cur_spin, 'r2eff_err'):
                    has_r2eff = True
                else:
                    has_r2eff = False
                    break

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_minimise = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_minimise = True

        # Dispersion model minimisation flags.
        else:
            do_minimise = True
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Do the minimisation.
        if do_minimise:
            self.interpreter.minimise.execute(min_algor=min_algor,
                                              func_tol=self.opt_func_tol,
                                              max_iter=self.opt_max_iterations,
                                              constraints=constraints)

        # Model elimination.
        if self.eliminate:
            self.interpreter.eliminate()

        # Monte Carlo simulations.
        do_monte_carlo = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_monte_carlo = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_monte_carlo = True

        elif self.mc_sim_all_models or len(self.models) < 2:
            do_monte_carlo = True
            # The constraints flag.
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Error estimation by Monte Carlo simulations.
        if do_monte_carlo:
            # Set the number of Monte-Carlo simulations.
            monte_carlo_sim = self.mc_sim_num

            # If the number for exponential curve fitting has been set.
            if model == MODEL_R2EFF and self.exp_mc_sim_num != None:
                monte_carlo_sim = self.exp_mc_sim_num

            # When set to minus 1, estimation of the errors will be extracted from the covariance matrix.
            # This is HIGHLY likely to be wrong, but can be used in an initial test fase.
            if model == MODEL_R2EFF and self.exp_mc_sim_num == -1:
                # Print
                subsection(file=sys.stdout,
                           text="Estimating errors from Covariance matrix",
                           prespace=1)

                # Raise warning.
                text = 'Estimating errors from the Covariance matrix is highly likely to be "quite" wrong.  Use only with extreme care, and for initial rapid testing of your data.'
                warn(RelaxWarning(text))

                # Estimate errors
                self.interpreter.relax_disp.r2eff_err_estimate()
            else:
                self.interpreter.monte_carlo.setup(number=monte_carlo_sim)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(
                    min_algor=min_algor,
                    func_tol=self.opt_func_tol,
                    max_iter=self.opt_max_iterations,
                    constraints=constraints)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()

    def pre_run_parameters(self, model=None, model_path=None):
        """Copy parameters from an earlier analysis.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout.
        subsection(file=sys.stdout, text="Pre-run parameters", prespace=1)

        # The data pipe name.
        pipe_name = self.name_pipe('pre')

        # Create a temporary data pipe for the previous run.
        self.interpreter.pipe.create(pipe_name=pipe_name,
                                     pipe_type='relax_disp')

        # Load the previous results.
        path = self.pre_run_dir + sep + model_path
        self.interpreter.results.read(file='results', dir=path)

        # Force copy of the R2eff values.
        if model == MODEL_R2EFF:
            self.interpreter.value.copy(pipe_from=pipe_name,
                                        pipe_to=self.name_pipe(model),
                                        param='r2eff',
                                        force=True)

        # Copy the parameters.
        self.interpreter.relax_disp.parameter_copy(
            pipe_from=pipe_name, pipe_to=self.name_pipe(model))

        # Finally, switch back to the original data pipe and delete the temporary one.
        self.interpreter.pipe.switch(pipe_name=self.name_pipe(model))
        self.interpreter.pipe.delete(pipe_name=pipe_name)

    def run(self):
        """Execute the auto-analysis."""

        # Peak intensity error analysis.
        if MODEL_R2EFF in self.models:
            self.error_analysis()

        # R1 parameter fitting.
        if self.r1_fit:
            subtitle(file=sys.stdout,
                     text="R1 parameter optimisation activation",
                     prespace=3)
            self.interpreter.relax_disp.r1_fit(fit=self.r1_fit)
        else:
            # No print out.
            self.interpreter.relax_disp.r1_fit(fit=self.r1_fit)

        # Loop over the models.
        self.model_pipes = []
        for model in self.models:
            # Printout.
            subtitle(file=sys.stdout,
                     text="The '%s' model" % model,
                     prespace=3)

            # The results directory path.
            model_path = model.replace(" ", "_")
            path = self.results_dir + sep + model_path

            # The name of the data pipe for the model.
            model_pipe = self.name_pipe(model)
            if self.is_model_for_selection(model):
                self.model_pipes.append(model_pipe)

            # Check that results do not already exist - i.e. a previous run was interrupted.
            path1 = path + sep + 'results'
            path2 = path1 + '.bz2'
            path3 = path1 + '.gz'
            if access(path1, F_OK) or access(path2, F_OK) or access(
                    path2, F_OK):
                # Printout.
                print(
                    "Detected the presence of results files for the '%s' model - loading these instead of performing optimisation for a second time."
                    % model)

                # Create a data pipe and switch to it.
                self.interpreter.pipe.create(pipe_name=model_pipe,
                                             pipe_type='relax_disp',
                                             bundle=self.pipe_bundle)
                self.interpreter.pipe.switch(model_pipe)

                # Load the results.
                self.interpreter.results.read(file='results', dir=path)

                # Jump to the next model.
                continue

            # Create the data pipe by copying the base pipe, then switching to it.
            self.interpreter.pipe.copy(pipe_from=self.pipe_name,
                                       pipe_to=model_pipe,
                                       bundle_to=self.pipe_bundle)
            self.interpreter.pipe.switch(model_pipe)

            # Select the model.
            self.interpreter.relax_disp.select_model(model)

            # Copy the R2eff values from the R2eff model data pipe.
            if model != MODEL_R2EFF and MODEL_R2EFF in self.models:
                self.interpreter.value.copy(
                    pipe_from=self.name_pipe(MODEL_R2EFF),
                    pipe_to=model_pipe,
                    param='r2eff')

            # Calculate the R2eff values for the fixed relaxation time period data types.
            if model == MODEL_R2EFF and not has_exponential_exp_type():
                self.interpreter.minimise.calculate()

            # Optimise the model.
            else:
                self.optimise(model=model, model_path=model_path)

            # Write out the results.
            self.write_results(path=path, model=model)

        # The final model selection data pipe.
        if len(self.models) >= 2:
            # Printout.
            section(file=sys.stdout, text="Final results", prespace=2)

            # Perform model selection.
            self.interpreter.model_selection(
                method=self.modsel,
                modsel_pipe=self.name_pipe('final'),
                bundle=self.pipe_bundle,
                pipes=self.model_pipes)

            # Final Monte Carlo simulations only.
            if not self.mc_sim_all_models:
                self.interpreter.monte_carlo.setup(number=self.mc_sim_num)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(
                    'simplex',
                    func_tol=self.opt_func_tol,
                    max_iter=self.opt_max_iterations,
                    constraints=True)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()

            # Writing out the final results.
            self.write_results(path=self.results_dir + sep + 'final')

        # No model selection.
        else:
            warn(
                RelaxWarning(
                    "Model selection in the dispersion auto-analysis has been skipped as only %s models have been optimised."
                    % len(self.model_pipes)))

        # Finally save the program state.
        self.interpreter.state.save(state='final_state',
                                    dir=self.results_dir,
                                    force=True)

    def write_results(self, path=None, model=None):
        """Create a set of results, text and Grace files for the current data pipe.

        @keyword path:  The directory to place the files into.
        @type path:     str
        """

        # Printout.
        section(file=sys.stdout, text="Results writing", prespace=2)

        # If this is the final model selection round, check which models have been tested.
        if model == None:
            models_tested = []
            for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
                spin_model = spin.model

                # Add to list, if not in already.
                if spin_model not in models_tested:
                    models_tested.append(spin_model)
        else:
            models_tested = None

        # Special for R2eff model.
        if model == MODEL_R2EFF:
            # The R2eff parameter.
            self.interpreter.value.write(param='r2eff',
                                         file='r2eff.out',
                                         dir=path,
                                         force=True)
            self.interpreter.grace.write(x_data_type='res_num',
                                         y_data_type='r2eff',
                                         file='r2eff.agr',
                                         dir=path,
                                         force=True)

            # Exponential curves.
            if has_exponential_exp_type():
                self.interpreter.relax_disp.plot_exp_curves(
                    file='intensities.agr', dir=path,
                    force=True)  # Average peak intensities.
                self.interpreter.relax_disp.plot_exp_curves(
                    file='intensities_norm.agr',
                    dir=path,
                    force=True,
                    norm=True)  # Average peak intensities (normalised).

                # The I0 parameter.
                self.interpreter.value.write(param='i0',
                                             file='i0.out',
                                             dir=path,
                                             force=True)
                self.interpreter.grace.write(x_data_type='res_num',
                                             y_data_type='i0',
                                             file='i0.agr',
                                             dir=path,
                                             force=True)

        # Dispersion curves.
        self.interpreter.relax_disp.plot_disp_curves(dir=path, force=True)
        self.interpreter.relax_disp.write_disp_curves(dir=path, force=True)

        # The selected models for the final run.
        if model == None:
            self.interpreter.value.write(param='model',
                                         file='model.out',
                                         dir=path,
                                         force=True)

        # For CPMG models.
        if has_cpmg_exp_type():
            # The R20 parameter.
            self.write_results_test(path=path,
                                    model=model,
                                    models_tested=models_tested,
                                    param='r2',
                                    file_name_ini='r20')

            # The R20A and R20B parameters.
            self.write_results_test(path=path,
                                    model=model,
                                    models_tested=models_tested,
                                    param='r2a',
                                    file_name_ini='r20a')
            self.write_results_test(path=path,
                                    model=model,
                                    models_tested=models_tested,
                                    param='r2b',
                                    file_name_ini='r20b')

        # For R1ho models.
        if has_r1rho_exp_type():
            # The R1 parameter.
            self.write_results_test(path=path,
                                    model=model,
                                    models_tested=models_tested,
                                    param='r1')

            # The R1rho prime parameter.
            self.write_results_test(path=path,
                                    model=model,
                                    models_tested=models_tested,
                                    param='r2',
                                    file_name_ini='r1rho_prime')

            # Plot specific R1rho graphs.
            if model in [None] + MODEL_LIST_R1RHO:
                self.interpreter.relax_disp.plot_disp_curves(
                    dir=path, x_axis=X_AXIS_THETA, force=True)
                self.interpreter.relax_disp.plot_disp_curves(
                    dir=path,
                    y_axis=Y_AXIS_R2_R1RHO,
                    x_axis=X_AXIS_W_EFF,
                    force=True)
                self.interpreter.relax_disp.plot_disp_curves(
                    dir=path,
                    y_axis=Y_AXIS_R2_EFF,
                    x_axis=X_AXIS_THETA,
                    interpolate=INTERPOLATE_OFFSET,
                    force=True)

            # The calculation of theta and w_eff parameter in R1rho experiments.
            if model in MODEL_LIST_R1RHO_FULL:
                self.interpreter.value.write(param='theta',
                                             file='theta.out',
                                             dir=path,
                                             force=True)
                self.interpreter.value.write(param='w_eff',
                                             file='w_eff.out',
                                             dir=path,
                                             force=True)

        # The pA and pB parameters.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='pA')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='pB')

        # The pC parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='pC')

        # The phi_ex parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='phi_ex')

        # The phi_ex_B nd phi_ex_C parameters.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='phi_ex_B')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='phi_ex_C')

        # The dw parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dw')

        # The dw_AB, dw_BC and dw_AC parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dw_AB')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dw_BC')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dw_AC')

        # The dwH parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dwH')

        # The dwH_AB, dwH_BC and dwH_AC parameter.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dwH_AB')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dwH_BC')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='dwH_AC')

        # The k_AB, kex and tex parameters.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='k_AB')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kex')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='tex')

        # The kex_AB, kex_BC, kex_AC parameters.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kex_AB')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kex_BC')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kex_AC')

        # The kB and kC parameters.
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kB')
        self.write_results_test(path=path,
                                model=model,
                                models_tested=models_tested,
                                param='kC')

        # Minimisation statistics.
        if not (model == MODEL_R2EFF and has_fixed_time_exp_type()):
            self.interpreter.value.write(param='chi2',
                                         file='chi2.out',
                                         dir=path,
                                         force=True)
            self.interpreter.grace.write(y_data_type='chi2',
                                         file='chi2.agr',
                                         dir=path,
                                         force=True)

        # Finally save the results.  This is last to allow the continuation of an interrupted analysis while ensuring that all results files have been created.
        self.interpreter.results.write(file='results', dir=path, force=True)

    def write_results_test(self,
                           path=None,
                           model=None,
                           models_tested=None,
                           param=None,
                           file_name_ini=None):
        """Create a set of results, text and Grace files for the current data pipe.

        @keyword path:              The directory to place the files into.
        @type path:                 str
        @keyword model:             The model tested.
        @type model:                None or str
        @keyword model_tested:      List of models tested, if the pipe is final.
        @type model_tested:         None or list of str.
        @keyword param:             The param to write out.
        @type param:                None or list of str.
        @keyword file_name_ini:     The initial part of the file name for the grace and text files.
        @type file_name_ini:        None or str.
        """

        # If not set, use the name of the parameter.
        if file_name_ini == None:
            file_name_ini = param

        # If the model is in the list of models which support the parameter.
        write_result = False
        if model != None:
            # Get the model params.
            model_params = MODEL_PARAMS[model]

            if param in model_params:
                write_result = True

        # If this is the final pipe, then check if the model has been tested at any time.
        elif model == None:
            # Loop through all tested models.
            for model_tested in models_tested:
                # If one of the models tested has a parameter which belong in the list of models which support the parameter, then write it out.
                model_params = MODEL_PARAMS[model_tested]

                if param in model_params:
                    write_result = True
                    break

        # Write results if some of the models supports the parameter.
        if write_result:
            self.interpreter.value.write(param=param,
                                         file='%s.out' % file_name_ini,
                                         dir=path,
                                         force=True)
            self.interpreter.grace.write(x_data_type='res_num',
                                         y_data_type=param,
                                         file='%s.agr' % file_name_ini,
                                         dir=path,
                                         force=True)
Exemple #50
0
class Test_spin(Spin_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.spin' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_spin, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.spin_fns = self.interpreter.spin


    def test_copy_argfail_pipe_from(self):
        """Test the proper failure of the spin.copy() user function for the pipe_from argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.spin_fns.copy, pipe_from=data[1], spin_from='#Old mol:1@111', spin_to='#Old mol:2')


    def test_copy_argfail_spin_from(self):
        """Test the proper failure of the spin.copy() user function for the spin_from argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.spin_fns.copy, spin_from=data[1], spin_to='#Old mol:2')


    def test_copy_argfail_pipe_to(self):
        """Test the proper failure of the spin.copy() user function for the pipe_to argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.spin_fns.copy, pipe_to=data[1], spin_from='#Old mol:1@111', spin_to='#Old mol:2')


    def test_copy_argfail_spin_to(self):
        """Test the proper failure of the spin.copy() user function for the spin_to argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or  data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.spin_fns.copy, spin_from='#Old mol:1@111', spin_to=data[1])


    def test_create_argfail_spin_num(self):
        """Test the proper failure of the spin.create() user function for the spin_num argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.spin_fns.create, spin_num=data[1], spin_name='NH')


    def test_create_argfail_spin_name(self):
        """Test the proper failure of the spin.create() user function for the spin_name argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.spin_fns.create, spin_name=data[1], spin_num=1)


    def test_create_argfail_res_num(self):
        """Test the proper failure of the spin.create() user function for the res_num argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.spin_fns.create, res_num=data[1], spin_name='NH')


    def test_create_argfail_res_name(self):
        """Test the proper failure of the spin.create() user function for the res_name argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.spin_fns.create, res_name=data[1], spin_num=1, spin_name='NH')


    def test_create_argfail_mol_name(self):
        """Test the proper failure of the spin.create() user function for the mol_name argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.spin_fns.create, mol_name=data[1], spin_num=1, spin_name='NH')


    def test_create_pseudo_argfail_spin_name(self):
        """The spin_name arg test of the spin.create_pseudo() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.spin_fns.create_pseudo, spin_name=data[1])


    def test_create_pseudo_argfail_spin_num(self):
        """The spin_num arg test of the spin.create_pseudo() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.spin_fns.create_pseudo, spin_num=data[1], spin_name='Q')


    def test_create_pseudo_argfail_res_id(self):
        """The res_id arg test of the spin.create_pseudo() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.spin_fns.create_pseudo, res_id=data[1], spin_name='Q')


    def test_create_pseudo_argfail_members(self):
        """The members arg test of the spin.create_pseudo() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str list argument, and skip it.
            if data[0] == 'str list':
                continue

            # The argument test.
            self.assertRaises(RelaxListStrError, self.spin_fns.create_pseudo, members=data[1], spin_name='Q')


    def test_create_pseudo_argfail_averaging(self):
        """The averaging arg test of the spin.create_pseudo() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.spin_fns.create_pseudo, averaging=data[1], spin_name='Q', members=['x'])


    def test_delete_argfail_spin_id(self):
        """Test the proper failure of the spin.delete() user function for the spin_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.spin_fns.delete, spin_id=data[1])


    def test_display_argfail_spin_id(self):
        """Test the proper failure of the spin.display() user function for the spin_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.spin_fns.display, spin_id=data[1])


    def test_name_argfail_spin_id(self):
        """Test the proper failure of the spin.name() user function for the spin_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.spin_fns.name, name='N', spin_id=data[1])


    def test_name_argfail_name(self):
        """Test the proper failure of the spin.name() user function for the name argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.spin_fns.name, name=data[1])


    def test_number_argfail_spin_id(self):
        """Test the proper failure of the spin.number() user function for the spin_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.spin_fns.number, spin_id=data[1])


    def test_number_argfail_number(self):
        """Test the proper failure of the spin.number() user function for the number argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError, self.spin_fns.number, spin_id='@111', number=data[1])
class Test_relax_disp(TestCase):
    """Unit tests for the functions of the 'prompt.relax_disp' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_relax_disp, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.relax_disp_fns = self.interpreter.relax_disp


    def test_relax_cpmg_frq_argfail_cpmg_frq(self):
        """The cpmg_frq arg test of the relax_disp.cpmg_frq() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, int and None arguments, and skip them.
            if data[0] == 'float' or data[0] == 'int' or data[0] == 'None':
                continue

        # The argument test.
        self.assertRaises(RelaxNoneNumError, self.relax_disp_fns.cpmg_frq, spectrum_id='test', cpmg_frq=data[1])


    def test_relax_cpmg_frq_argfail_spectrum_id(self):
        """The spectrum_id arg test of the relax_disp.cpmg_frq() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

        # The argument test.
        self.assertRaises(RelaxStrError, self.relax_disp_fns.cpmg_frq, spectrum_id=data[1])


    def test_relax_exp_type_argfail_exp_type(self):
        """The exp_type arg test of the relax_disp.exp_type() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

        # The argument test.
        self.assertRaises(RelaxStrError, self.relax_disp_fns.exp_type, exp_type=data[1])


    def test_relax_select_model_argfail_model(self):
        """The model arg test of the relax_disp.select_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

        # The argument test.
        self.assertRaises(RelaxStrError, self.relax_disp_fns.select_model, model=data[1])
Exemple #52
0
class Test_residue(Residue_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.residue' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_residue, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.residue_fns = self.interpreter.residue

    def test_copy_argfail_pipe_from(self):
        """Test the proper failure of the residue.copy() user function for the pipe_from argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.residue_fns.copy,
                              pipe_from=data[1],
                              res_from='#Old mol:1',
                              res_to='#Old mol:2')

    def test_copy_argfail_res_from(self):
        """Test the proper failure of the residue.copy() user function for the res_from argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.residue_fns.copy,
                              res_from=data[1],
                              res_to='#Old mol:2')

    def test_copy_argfail_pipe_to(self):
        """Test the proper failure of the residue.copy() user function for the pipe_to argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.residue_fns.copy,
                              pipe_to=data[1],
                              res_from='#Old mol:1',
                              res_to='#Old mol:2')

    def test_copy_argfail_res_to(self):
        """Test the proper failure of the residue.copy() user function for the res_to argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.residue_fns.copy,
                              res_from='#Old mol:1@111',
                              res_to=data[1])

    def test_create_argfail_res_num(self):
        """Test the proper failure of the residue.create() user function for the res_num argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int and bin arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneIntError,
                              self.residue_fns.create,
                              res_num=data[1],
                              res_name='NH')

    def test_create_argfail_res_name(self):
        """Test the proper failure of the residue.create() user function for the res_name argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.residue_fns.create,
                              res_name=data[1],
                              res_num=1)

    def test_create_argfail_mol_name(self):
        """Test the proper failure of the residue.create() user function for the mol_name argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.residue_fns.create,
                              mol_name=data[1],
                              res_num=1,
                              res_name='NH')

    def test_delete_argfail_res_id(self):
        """Test the proper failure of the residue.delete() user function for the res_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.residue_fns.delete,
                              res_id=data[1])

    def test_display_argfail_res_id(self):
        """Test the proper failure of the residue.display() user function for the res_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.residue_fns.display,
                              res_id=data[1])

    def test_name_argfail_res_id(self):
        """Test the proper failure of the residue.name() user function for the res_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.residue_fns.name,
                              res_id=data[1])

    def test_name_argfail_name(self):
        """Test the proper failure of the residue.name() user function for the name argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.residue_fns.name,
                              name=data[1])

    def test_number_argfail_res_id(self):
        """Test the proper failure of the residue.number() user function for the res_id argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.residue_fns.number,
                              res_id=data[1])

    def test_number_argfail_number(self):
        """Test the proper failure of the residue.number() user function for the number argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.residue_fns.number,
                              res_id=':1',
                              number=data[1])
Exemple #53
0
class Test_n_state_model(N_state_model_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.n_state_model' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_n_state_model, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.n_state_model_fns = self.interpreter.n_state_model

    def test_CoM_argfail_pivot_point(self):
        """The pivot_point arg test of the n_state_model.CoM() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int, float, and number list arguments, and skip them (if the length is 3).
            if (data[0] == 'int list' or data[0] == 'float list'
                    or data[0] == 'number list') and len(data[1]) == 3:
                continue

            # The argument test.
            self.assertRaises(RelaxListNumError,
                              self.n_state_model_fns.CoM,
                              pivot_point=data[1])

    def test_CoM_argfail_centre(self):
        """The centre arg test of the n_state_model.CoM() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, int, float, and number list arguments, and skip them (if the length is 3).
            if data[0] == 'None' or (
                (data[0] == 'int list' or data[0] == 'float list'
                 or data[0] == 'number list') and len(data[1]) == 3):
                continue

            # The argument test.
            self.assertRaises(RelaxListNumError,
                              self.n_state_model_fns.CoM,
                              centre=data[1])

    def test_cone_pdb_argfail_cone_type(self):
        """The cone_type arg test of the n_state_model.cone_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.n_state_model_fns.cone_pdb,
                              cone_type=data[1])

    def test_cone_pdb_argfail_scale(self):
        """The scale arg test of the n_state_model.cone_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, bin, and int arguments, and skip them.
            if data[0] == 'float' or data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError,
                              self.n_state_model_fns.cone_pdb,
                              cone_type='',
                              scale=data[1])

    def test_cone_pdb_argfail_file(self):
        """The file arg test of the n_state_model.cone_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the file and str arguments, and skip them.
            if data[0] in ['file', 'str']:
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileError,
                              self.n_state_model_fns.cone_pdb,
                              cone_type='',
                              file=data[1])

    def test_cone_pdb_argfail_dir(self):
        """The dir arg test of the n_state_model.cone_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.n_state_model_fns.cone_pdb,
                              cone_type='',
                              dir=data[1])

    def test_cone_pdb_argfail_force(self):
        """The force arg test of the n_state_model.cone_pdb() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.n_state_model_fns.cone_pdb,
                              cone_type='',
                              force=data[1])

    def test_number_of_states_argfail_N(self):
        """Failure of the N arg of the n_state_model.number_of_states() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bin and int arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.n_state_model_fns.number_of_states,
                              N=data[1])

    def test_ref_domain_argfail_ref(self):
        """Failure of the ref arg of the n_state_model.ref_domain() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.n_state_model_fns.ref_domain,
                              ref=data[1])

    def test_select_model_argfail_model(self):
        """Failure of the model arg of the n_state_model.select_model() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError,
                              self.n_state_model_fns.select_model,
                              model=data[1])
Exemple #54
0
class Test_state(State_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.state' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_state, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.state = self.interpreter.state

        # Alias the user functions to work with the backend.
        self.state.load_state = self.state.load
        self.state.save_state = self.state.save

    def test_load_argfail_state(self):
        """Test the proper failure of the state.load() user function for the state argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str and file arguments, and skip them.
            if data[0] == 'str' or data[0] == 'file':
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileError,
                              self.state.load_state,
                              state=data[1])

    def test_load_argfail_dir(self):
        """Test the proper failure of the state.load() user function for the dir argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.state.load_state,
                              state='a',
                              dir=data[1])

    def test_save_argfail_state(self):
        """Test the proper failure of the state.save() user function for the state argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str and file arguments, and skip them.
            if data[0] == 'str' or data[0] == 'file':
                continue

            # The argument test.
            self.assertRaises(RelaxStrFileError,
                              self.state.save_state,
                              state=data[1])

    def test_save_argfail_dir(self):
        """Test the proper failure of the state.save() user function for the dir argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.state.save_state,
                              state='a',
                              dir=data[1])

    def test_save_argfail_force(self):
        """Test the proper failure of the state.save() user function for the force argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.state.save_state,
                              state='a',
                              force=data[1])

    def test_save_argfail_compress_type(self):
        """Test the proper failure of the state.save() user function for the compress_type argument."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.state.save_state,
                              state='a',
                              compress_type=data[1])
Exemple #55
0
class NOE_calc:
    def __init__(self, pipe_name=None, pipe_bundle=None, file_root='noe', results_dir=None, save_state=True):
        """Perform relaxation curve fitting.

        To use this auto-analysis, a data pipe with all the required data needs to be set up.  This data pipe should contain the following:

            - All the spins loaded.
            - Unresolved spins deselected.
            - The NOE peak intensities from the saturated and reference spectra.
            - Either the baseplane noise RMDS values should be set or replicated spectra loaded.

        @keyword pipe_name:     The name of the data pipe containing all of the data for the analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:      str
        @keyword file_root:     File root of the output filea.
        @type file_root:        str
        @keyword results_dir:   The directory where results files are saved.
        @type results_dir:      str
        @keyword save_state:    A flag which if True will cause a relax save state to be created at the end of the analysis.
        @type save_state:       bool
        """

        # Execution lock.
        status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

        # Set up the analysis status object.
        status.init_auto_analysis(pipe_bundle, type='noe')
        status.current_analysis = pipe_bundle

        # Store the args.
        self.save_state = save_state
        self.pipe_name = pipe_name
        self.pipe_bundle = pipe_bundle
        self.file_root = file_root
        self.results_dir = results_dir
        if self.results_dir:
            self.grace_dir = results_dir + sep + 'grace'
        else:
            self.grace_dir = 'grace'

        # Data checks.
        self.check_vars()

        # Set the data pipe to the current data pipe.
        if self.pipe_name != cdp_name():
            switch(self.pipe_name)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Execute.
        self.run()

        # Finish and unlock execution.
        status.auto_analysis[self.pipe_bundle].fin = True
        status.current_analysis = None
        status.exec_lock.release()


    def run(self):
        """Set up and run the NOE analysis."""

        # Peak intensity error analysis.
        self.interpreter.spectrum.error_analysis()

        # Calculate the NOEs.
        self.interpreter.calc()

        # Save the NOEs.
        self.interpreter.value.write(param='noe', file=self.file_root+'.out', dir=self.results_dir, force=True)

        # Save the results.
        self.interpreter.results.write(file='results', dir=self.results_dir, force=True)

        # Create Grace plots of the data.
        self.interpreter.grace.write(y_data_type='ref', file='ref.agr', dir=self.grace_dir, force=True)
        self.interpreter.grace.write(y_data_type='sat', file='sat.agr', dir=self.grace_dir, force=True)
        self.interpreter.grace.write(y_data_type='noe', file='noe.agr', dir=self.grace_dir, force=True)

        # Save the program state.
        if self.save_state:
            self.interpreter.state.save(state=self.file_root+'.save', dir=self.results_dir, force=True)


    def check_vars(self):
        """Check that the user has set the variables correctly."""

        # The pipe name.
        if not has_pipe(self.pipe_name):
            raise RelaxNoPipeError(self.pipe_name)
Exemple #56
0
class Test_minimisation(Minimisation_base_class, TestCase):
    """Unit tests for the functions of the 'prompt.minimisation' module."""
    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_minimisation, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False,
                                       raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.minimisation_fns = self.interpreter.minimise

    def test_calc_argfail_verbosity(self):
        """The verbosity arg test of the minimise.calculate() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.minimisation_fns.calculate,
                              verbosity=data[1])

    def test_grid_search_argfail_lower(self):
        """The lower arg test of the minimise.grid_search() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and int, float, and number list arguments arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int list' or data[
                    0] == 'float list' or data[0] == 'number list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListNumError,
                              self.minimisation_fns.grid_search,
                              lower=data[1])

    def test_grid_search_argfail_upper(self):
        """The upper arg test of the minimise.grid_search() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and int, float, and number list arguments arguments, and skip them.
            if data[0] == 'None' or data[0] == 'int list' or data[
                    0] == 'float list' or data[0] == 'number list':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListNumError,
                              self.minimisation_fns.grid_search,
                              upper=data[1])

    def test_grid_search_argfail_inc(self):
        """The inc arg test of the minimise.grid_search() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bin, int, and interger list arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int' or data[
                    0] == 'int list' or data[0] == 'none list':
                continue

            # The argument test.
            self.assertRaises(RelaxIntListIntError,
                              self.minimisation_fns.grid_search,
                              inc=data[1])

    def test_grid_search_argfail_constraints(self):
        """The constraints arg test of the minimise.grid_search() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.minimisation_fns.grid_search,
                              constraints=data[1])

    def test_grid_search_argfail_verbosity(self):
        """The verbosity arg test of the minimise.grid_search() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the int and bin arguments, and skip them.
            if data[0] == 'int' or data[0] == 'bin':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.minimisation_fns.grid_search,
                              verbosity=data[1])

    def test_minimise_argfail_bad_keyword(self):
        """The test of a bad keyword argument in the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # The argument test.
            self.assertRaises(RelaxError,
                              self.minimisation_fns.execute,
                              'Newton',
                              step_tol=data[1])

    def test_minimise_argfail_min_algor(self):
        """The min_algor arg test of the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.minimisation_fns.execute,
                              data[1])

    def test_minimise_argfail_line_search(self):
        """The line_search arg test of the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.minimisation_fns.execute,
                              'Newton',
                              line_search=data[1])

    def test_minimise_argfail_hessian_mod(self):
        """The hessian_mod arg test of the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.minimisation_fns.execute,
                              'Newton',
                              hessian_mod=data[1])

    def test_minimise_argfail_hessian_type(self):
        """The hessian_type arg test of the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError,
                              self.minimisation_fns.execute,
                              'Newton',
                              hessian_type=data[1])

    def test_minimise_argfail_func_tol(self):
        """The func_tol arg test of the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the float, bin, and int arguments, and skip them.
            if data[0] == 'float' or data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNumError,
                              self.minimisation_fns.execute,
                              'Newton',
                              func_tol=data[1])

    def test_minimise_argfail_grad_tol(self):
        """The grad_tol arg test of the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None, float, bin, and int arguments, and skip them.
            if data[0] == 'None' or data[0] == 'float' or data[
                    0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneNumError,
                              self.minimisation_fns.execute,
                              'Newton',
                              grad_tol=data[1])

    def test_minimise_argfail_max_iter(self):
        """The max_iter arg test of the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bin and int arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.minimisation_fns.execute,
                              'Newton',
                              max_iter=data[1])

    def test_minimise_argfail_constraints(self):
        """The constraints arg test of the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.minimisation_fns.execute,
                              'Newton',
                              constraints=data[1])

    def test_minimise_argfail_scaling(self):
        """The scaling arg test of the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError,
                              self.minimisation_fns.execute,
                              'Newton',
                              scaling=data[1])

    def test_minimise_argfail_verbosity(self):
        """The verbosity arg test of the minimise.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bin and int arguments, and skip them.
            if data[0] == 'bin' or data[0] == 'int':
                continue

            # The argument test.
            self.assertRaises(RelaxIntError,
                              self.minimisation_fns.execute,
                              'Newton',
                              verbosity=data[1])
Exemple #57
0
class Test_dasha(TestCase):
    """Unit tests for the functions of the 'prompt.dasha' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_dasha, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.dasha_fns = self.interpreter.dasha


    def test_create_argfail_algor(self):
        """Failure of the algor arg of the dasha.create() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.dasha_fns.create, algor=data[1])


    def test_create_argfail_dir(self):
        """Failure of the dir arg of the dasha.create() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.dasha_fns.create, dir=data[1])


    def test_create_argfail_force(self):
        """The force arg test of the dasha.create() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.dasha_fns.create, force=data[1])


    def test_execute_argfail_dir(self):
        """Failure of the dir arg of the dasha.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.dasha_fns.execute, dir=data[1])


    def test_execute_argfail_force(self):
        """The force arg test of the dasha.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the bool arguments, and skip them.
            if data[0] == 'bool':
                continue

            # The argument test.
            self.assertRaises(RelaxBoolError, self.dasha_fns.execute, force=data[1])


    def test_execute_argfail_binary(self):
        """Failure of the binary arg of the dasha.execute() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str argument, and skip it.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.dasha_fns.execute, binary=data[1])


    def test_extract_argfail_dir(self):
        """Failure of the dir arg of the dasha.extract() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.dasha_fns.extract, dir=data[1])
class Test_molmol(TestCase):
    """Unit tests for the functions of the 'prompt.molmol' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_molmol, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.molmol_fns = self.interpreter.molmol


    def test_macro_apply_argfail_data_type(self):
        """The data_type arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.molmol_fns.macro_apply, data_type=data[1])


    def test_macro_apply_argfail_style(self):
        """The style arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.molmol_fns.macro_apply, data_type='a', style=data[1])


    def test_macro_apply_argfail_colour_start_name(self):
        """The colour_start_name arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.molmol_fns.macro_apply, data_type='a', style='x', colour_start_name=data[1])


    def test_macro_apply_argfail_colour_start_rgb(self):
        """The colour_start_rgb arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and num list arguments, and skip them.
            if data[0] == 'None' or ((data[0] == 'int list' or data[0] == 'float list' or data[0] == 'number list') and len(data[1]) == 3):
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListNumError, self.molmol_fns.macro_apply, data_type='a', style='x', colour_start_rgb=data[1])


    def test_macro_apply_argfail_colour_end_name(self):
        """The colour_end_name arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.molmol_fns.macro_apply, data_type='a', style='x', colour_end_name=data[1])


    def test_macro_apply_argfail_colour_end_rgb(self):
        """The colour_end_rgb arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and num list arguments, and skip them.
            if data[0] == 'None' or ((data[0] == 'int list' or data[0] == 'float list' or data[0] == 'number list') and len(data[1]) == 3):
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListNumError, self.molmol_fns.macro_apply, data_type='a', style='x', colour_end_rgb=data[1])


    def test_macro_apply_argfail_colour_list(self):
        """The colour_list arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.molmol_fns.macro_apply, data_type='a', style='x', colour_list=data[1])
    def __init__(self, stage=1, results_dir=None, num_ens=10000, num_models=10, configs=None, snapshot_dir='snapshots', snapshot_min=None, snapshot_max=None, pseudo=None, noe_file=None, noe_norm=None, rdc_name=None, rdc_file=None, rdc_spin_id1_col=None, rdc_spin_id2_col=None, rdc_data_col=None, rdc_error_col=None, bond_length=None, bond_length_file=None, log=None, bucket_num=200, lower_lim_noe=0.0, upper_lim_noe=600.0, lower_lim_rdc=0.0, upper_lim_rdc=1.0):
        """Set up for the stereochemistry analysis.

        @keyword stage:             Stage of analysis (see the module docstring above for the options).  
        @type stage:                int
        @keyword results_dir:       The optional directory to place all results files into.
        @type results_dir:          None or str
        @keyword num_ens:           Number of ensembles.
        @type num_ens:              int
        @keyword num_models:        Ensemble size.
        @type num_models:           int
        @keyword configs:           All the configurations.
        @type configs:              list of str
        @keyword snapshot_dir:      Snapshot directories (corresponding to the configurations).
        @type snapshot_dir:         list of str
        @keyword snapshot_min:      The number of the first snapshots (corresponding to the configurations).
        @type snapshot_min:         list of int
        @keyword snapshot_max:      The number of the last snapshots (corresponding to the configurations).
        @type snapshot_max:         list of int
        @keyword pseudo:            The list of pseudo-atoms.  Each element is a list of the pseudo-atom name and a list of all those atoms forming the pseudo-atom.  For example, pseudo = [["Q7", ["@H16", "@H17", "@H18"]], ["Q9", ["@H20", "@H21", "@H22"]]].
        @type pseudo:               list of list of str and list of str
        @keyword noe_file:          The name of the NOE restraint file.
        @type noe_file:             str
        @keyword noe_norm:          The NOE normalisation factor (equal to the sum of all NOEs squared).
        @type noe_norm:             float
        @keyword rdc_name:          The label for this RDC data set.
        @type rdc_name:             str
        @keyword rdc_file:          The name of the RDC file.
        @type rdc_file:             str
        @keyword rdc_spin_id1_col:  The spin ID column of the first spin in the RDC file.
        @type rdc_spin_id1_col:     None or int
        @keyword rdc_spin_id2_col:  The spin ID column of the second spin in the RDC file.
        @type rdc_spin_id2_col:     None or int
        @keyword rdc_data_col:      The data column of the RDC file.
        @type rdc_data_col:         int
        @keyword rdc_error_col:     The error column of the RDC file.
        @type rdc_error_col:        int
        @keyword bond_length:       The bond length value in meters.  This overrides the bond_length_file argument.
        @type bond_length:          float or None
        @keyword bond_length_file:  The file of bond lengths for each atom pair in meters.  The first and second columns must be the spin ID strings and the third column must contain the data.
        @type bond_length_file:     float or None
        @keyword log:               Log file output flag (only for certain stages).
        @type log:                  bool
        @keyword bucket_num:        Number of buckets for the distribution plots.
        @type bucket_num:           int
        @keyword lower_lim_noe:     Distribution plot limits.
        @type lower_lim_noe:        int
        @keyword upper_lim_noe:     Distribution plot limits.
        @type upper_lim_noe:        int
        @keyword lower_lim_rdc:     Distribution plot limits.
        @type lower_lim_rdc:        int
        @keyword upper_lim_rdc:     Distribution plot limits.
        @type upper_lim_rdc:        int
        """

        # Execution lock.
        status.exec_lock.acquire('auto stereochem analysis', mode='auto-analysis')

        # Set up the analysis status object.
        status.init_auto_analysis('stereochem', type='stereochem')
        status.current_analysis = 'auto stereochem analysis'

        # Store all the args.
        self.stage = stage
        self.results_dir = results_dir
        self.num_ens = num_ens
        self.num_models = num_models
        self.configs = configs
        self.snapshot_dir = snapshot_dir
        self.snapshot_min = snapshot_min
        self.snapshot_max = snapshot_max
        self.pseudo = pseudo
        self.noe_file = noe_file
        self.noe_norm = noe_norm
        self.rdc_name = rdc_name
        self.rdc_file = rdc_file
        self.rdc_spin_id1_col = rdc_spin_id1_col
        self.rdc_spin_id2_col = rdc_spin_id2_col
        self.rdc_data_col = rdc_data_col
        self.rdc_error_col = rdc_error_col
        self.bond_length = bond_length
        self.bond_length_file = bond_length_file
        self.log = log
        self.bucket_num = bucket_num
        self.lower_lim_noe = lower_lim_noe
        self.upper_lim_noe = upper_lim_noe
        self.lower_lim_rdc = lower_lim_rdc
        self.upper_lim_rdc = upper_lim_rdc

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Create the results directory.
        if self.results_dir:
            mkdir_nofail(self.results_dir)

        # Or use the current working directory.
        else:
            self.results_dir = getcwd()

        # Create a directory for log files.
        if self.log:
            mkdir_nofail(self.results_dir + sep + "logs")

        # Finish and unlock execution.
        status.auto_analysis['stereochem'].fin = True
        status.current_analysis = None
        status.exec_lock.release()
Exemple #60
0
class Test_molmol(TestCase):
    """Unit tests for the functions of the 'prompt.molmol' module."""

    def __init__(self, methodName=None):
        """Set up the test case class for the system tests."""

        # Execute the base __init__ methods.
        super(Test_molmol, self).__init__(methodName)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Alias the user function class.
        self.molmol_fns = self.interpreter.molmol


    def test_macro_apply_argfail_data_type(self):
        """The data_type arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.molmol_fns.macro_apply, data_type=data[1])


    def test_macro_apply_argfail_style(self):
        """The style arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the str arguments, and skip them.
            if data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxStrError, self.molmol_fns.macro_apply, data_type='a', style=data[1])


    def test_macro_apply_argfail_colour_start_name(self):
        """The colour_start_name arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.molmol_fns.macro_apply, data_type='a', style='x', colour_start_name=data[1])


    def test_macro_apply_argfail_colour_start_rgb(self):
        """The colour_start_rgb arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and num list arguments, and skip them.
            if data[0] == 'None' or ((data[0] == 'int list' or data[0] == 'float list' or data[0] == 'number list') and len(data[1]) == 3):
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListNumError, self.molmol_fns.macro_apply, data_type='a', style='x', colour_start_rgb=data[1])


    def test_macro_apply_argfail_colour_end_name(self):
        """The colour_end_name arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.molmol_fns.macro_apply, data_type='a', style='x', colour_end_name=data[1])


    def test_macro_apply_argfail_colour_end_rgb(self):
        """The colour_end_rgb arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and num list arguments, and skip them.
            if data[0] == 'None' or ((data[0] == 'int list' or data[0] == 'float list' or data[0] == 'number list') and len(data[1]) == 3):
                continue

            # The argument test.
            self.assertRaises(RelaxNoneListNumError, self.molmol_fns.macro_apply, data_type='a', style='x', colour_end_rgb=data[1])


    def test_macro_apply_argfail_colour_list(self):
        """The colour_list arg test of the molmol.macro_apply() user function."""

        # Loop over the data types.
        for data in DATA_TYPES:
            # Catch the None and str arguments, and skip them.
            if data[0] == 'None' or data[0] == 'str':
                continue

            # The argument test.
            self.assertRaises(RelaxNoneStrError, self.molmol_fns.macro_apply, data_type='a', style='x', colour_list=data[1])