示例#1
0
    def test_value_set_r1_rit(self):
        """Test of the pipe_control.value.set() function."""

        # Set the current data pipe to 'mf'.
        pipes.switch('relax_disp')

        # Set variables.
        exp_type = 'R1rho'
        frq = 800.1 * 1E6

        # Set an experiment type to the pipe.
        set_exp_type(spectrum_id='test', exp_type=exp_type)

        # Set a frequency to loop through.
        spectrometer.set_frequency(id='test', frq=frq, units='Hz')

        # Generate dic key.
        r20_key = generate_r20_key(exp_type=exp_type, frq=frq)

        # Set first similar to r2.
        value.set(val=None, param='r2')
        self.assertEqual(cdp.mol[0].res[0].spin[0].r2[r20_key], 10.0)

        # Then set for r1.
        value.set(val=None, param='r1')
        print(cdp.mol[0].res[0].spin[0])
        self.assertEqual(cdp.mol[0].res[0].spin[0].r1[r20_key], 2.0)
    def test_copy_pull_ellipsoid(self):
        """Test the copying of an ellipsoid diffusion tensor (pulling the data from another pipe).

        The functions tested are both pipe_control.diffusion_tensor.copy() and
        prompt.diffusion_tensor.copy().
        """

        # Initialise the tensor.
        self.diffusion_tensor_fns.init(params=(13.9, 1.8, 0.7, 10.6, -23.3, 0.34), time_scale=1e-9, d_scale=1e7, angle_units='rad', param_types=0, fixed=True)

        # Change the current data pipe.
        pipes.switch('test')

        # Get the data pipe.
        dp = pipes.get_pipe('test')

        # Copy the tensor to the test pipe.
        self.diffusion_tensor_fns.copy(pipe_from='orig')

        # Test the diffusion tensor.
        self.assertEqual(dp.diff_tensor.type, 'ellipsoid')
        self.assertAlmostEqual(dp.diff_tensor.tm * 1e9, 13.9, 14)
        self.assertEqual(dp.diff_tensor.Da, 1.8e7)
        self.assertEqual(dp.diff_tensor.Dr, 0.7)
        self.assertEqual(dp.diff_tensor.alpha, 1.1752220392306203)
        self.assertEqual(dp.diff_tensor.beta, 1.8327412287183442)
        self.assertEqual(dp.diff_tensor.gamma, 0.34)
        self.assertEqual(dp.diff_tensor.fixed, 1)
    def test_copy_pull_ellipsoid(self):
        """Test the copying of an ellipsoid diffusion tensor (pulling the data from another pipe).

        The functions tested are both pipe_control.diffusion_tensor.copy() and
        prompt.diffusion_tensor.copy().
        """

        # Initialise the tensor.
        self.diffusion_tensor_fns.init(params=(13.9, 1.8, 0.7, 10.6, -23.3,
                                               0.34),
                                       time_scale=1e-9,
                                       d_scale=1e7,
                                       angle_units='rad',
                                       param_types=0,
                                       fixed=True)

        # Change the current data pipe.
        pipes.switch('test')

        # Get the data pipe.
        dp = pipes.get_pipe('test')

        # Copy the tensor to the test pipe.
        self.diffusion_tensor_fns.copy(pipe_from='orig')

        # Test the diffusion tensor.
        self.assertEqual(dp.diff_tensor.type, 'ellipsoid')
        self.assertAlmostEqual(dp.diff_tensor.tm * 1e9, 13.9, 14)
        self.assertEqual(dp.diff_tensor.Da, 1.8e7)
        self.assertEqual(dp.diff_tensor.Dr, 0.7)
        self.assertEqual(dp.diff_tensor.alpha, 1.1752220392306203)
        self.assertEqual(dp.diff_tensor.beta, 1.8327412287183442)
        self.assertEqual(dp.diff_tensor.gamma, 0.34)
        self.assertEqual(dp.diff_tensor.fixed, 1)
示例#4
0
    def test_value_set_r1_rit(self):
        """Test of the pipe_control.value.set() function."""

        # Set the current data pipe to 'mf'.
        pipes.switch('relax_disp')

        # Set variables.
        exp_type = 'R1rho'
        frq = 800.1 * 1E6

        # Set an experiment type to the pipe.
        set_exp_type(spectrum_id='test', exp_type=exp_type)

        # Set a frequency to loop through.
        spectrometer.set_frequency(id='test', frq=frq, units='Hz')

        # Generate dic key.
        r20_key = generate_r20_key(exp_type=exp_type, frq=frq)

        # Set first similar to r2.
        value.set(val=None, param='r2')
        self.assertEqual(cdp.mol[0].res[0].spin[0].r2[r20_key], 10.0)

        # Then set for r1.
        value.set(val=None, param='r1')
        print(cdp.mol[0].res[0].spin[0])
        self.assertEqual(cdp.mol[0].res[0].spin[0].r1[r20_key], 2.0)
    def test_copy_pull_spheroid(self):
        """Test the copying of a spheroidal diffusion tensor (pulling the data from another pipe).

        The functions tested are both pipe_control.diffusion_tensor.copy() and
        prompt.diffusion_tensor.copy().
        """

        # Initialise the tensor.
        self.diffusion_tensor_fns.init(params=(8.6, 1.3, 600, -20),
                                       time_scale=1e-9,
                                       d_scale=1e7,
                                       angle_units='deg',
                                       param_types=2,
                                       spheroid_type='prolate',
                                       fixed=False)

        # Change the current data pipe.
        pipes.switch('test')

        # Get the data pipe.
        dp = pipes.get_pipe('test')

        # Copy the tensor to the test pipe.
        self.diffusion_tensor_fns.copy(pipe_from='orig', pipe_to='test')

        # Test the diffusion tensor.
        self.assertEqual(dp.diff_tensor.type, 'spheroid')
        self.assertEqual(dp.diff_tensor.spheroid_type, 'prolate')
        self.assertAlmostEqual(dp.diff_tensor.tm * 1e9, 8.6, 14)
        self.assertEqual(dp.diff_tensor.Da, 5.2854122621564493e6)
        self.assertEqual(dp.diff_tensor.theta, 5.2359877559829879)
        self.assertEqual(dp.diff_tensor.phi, 2.7925268031909276)
        self.assertEqual(dp.diff_tensor.fixed, 0)
    def test_copy_pull(self):
        """Test the copying of an alignment tensor (pulling the data from another pipe).

        The functions tested are both pipe_control.align_tensor.copy() and
        prompt.align_tensor.copy().
        """

        # Initialise the tensor.
        self.align_tensor_fns.init(tensor='Pf1', align_id='Pf1', params=(-16.6278, 6.13037, 7.65639, -1.89157, 19.2561), scale=1.0, angle_units='rad', param_types=0)

        # Change the current data pipe.
        pipes.switch('test')

        # Get the data pipe.
        dp = pipes.get_pipe('test')

        # Copy the tensor to the test pipe.
        self.align_tensor_fns.copy(tensor_from='Pf1', pipe_from='orig', tensor_to='Pf1')

        # Test the alignment tensor.
        self.assertEqual(dp.align_tensors[0].Sxx, -16.6278)
        self.assertEqual(dp.align_tensors[0].Syy, 6.13037)
        self.assertEqual(dp.align_tensors[0].Sxy, 7.65639)
        self.assertEqual(dp.align_tensors[0].Sxz, -1.89157)
        self.assertAlmostEqual(dp.align_tensors[0].Syz, 19.2561)
示例#7
0
    def model_statistics(self, model_info=None, spin_id=None, global_stats=None):
        """Return the k, n, and chi2 model statistics of the hybrid.

        k - number of parameters.
        n - number of data points.
        chi2 - the chi-squared value.


        @keyword model_index:   The model index.  This is zero for the global models or equal to the
                                global spin index (which covers the molecule, residue, and spin
                                indices).  This originates from the model_loop().
        @type model_index:      int
        @keyword spin_id:       The spin identification string.  Either this or the instance keyword
                                argument must be supplied.
        @type spin_id:          None or str
        @keyword global_stats:  A parameter which determines if global or local statistics are
                                returned.  If None, then the appropriateness of global or local
                                statistics is automatically determined.
        @type global_stats:     None or bool
        @return:                The optimisation statistics, in tuple format, of the number of
                                parameters (k), the number of data points (n), and the chi-squared
                                value (chi2).
        @rtype:                 tuple of int, int, float
        """

        # Bad argument combination.
        if model_info == None and spin_id == None:
            raise RelaxError("Either the model_info or spin_id argument must be supplied.")
        elif model_info != None and spin_id != None:
            raise RelaxError("The model_info arg " + repr(model_info) + " and spin_id arg " + repr(spin_id) + " clash.  Only one should be supplied.")

        # Initialise.
        k_total = 0
        n_total = 0
        chi2_total = 0.0

        # Specific setup.
        for pipe in cdp.hybrid_pipes:
            # Switch to the data pipe.
            pipes.switch(pipe)

            # Specific model statistics and number of instances functions.
            model_statistics = setup.get_specific_fn('model_stats', pipes.get_type(pipe))

            # Loop over the instances.
            #for i in range(num):
            # Get the statistics.
            k, n, chi2 = model_statistics(model_info=model_info, spin_id=spin_id, global_stats=global_stats)

            # Bad stats.
            if k == None or n == None or chi2 == None:
                continue

            # Sum the stats.
            k_total = k_total + k
            n_total = n_total + n
            chi2_total = chi2_total + chi2

        # Return the totals.
        return k_total, n_total, chi2_total
    def test_copy_pull_spheroid(self):
        """Test the copying of a spheroidal diffusion tensor (pulling the data from another pipe).

        The functions tested are both pipe_control.diffusion_tensor.copy() and
        prompt.diffusion_tensor.copy().
        """

        # Initialise the tensor.
        self.diffusion_tensor_fns.init(params=(8.6, 1.3, 600, -20), time_scale=1e-9, d_scale=1e7, angle_units='deg', param_types=2, spheroid_type='prolate', fixed=False)

        # Change the current data pipe.
        pipes.switch('test')

        # Get the data pipe.
        dp = pipes.get_pipe('test')

        # Copy the tensor to the test pipe.
        self.diffusion_tensor_fns.copy(pipe_from='orig', pipe_to='test')

        # Test the diffusion tensor.
        self.assertEqual(dp.diff_tensor.type, 'spheroid')
        self.assertEqual(dp.diff_tensor.spheroid_type, 'prolate')
        self.assertAlmostEqual(dp.diff_tensor.tm * 1e9, 8.6, 14)
        self.assertEqual(dp.diff_tensor.Da, 5.2854122621564493e6)
        self.assertEqual(dp.diff_tensor.theta, 5.2359877559829879)
        self.assertEqual(dp.diff_tensor.phi, 2.7925268031909276)
        self.assertEqual(dp.diff_tensor.fixed, 0)
示例#9
0
    def test_copy_pull(self):
        """Test the copying of an alignment tensor (pulling the data from another pipe).

        The functions tested are both pipe_control.align_tensor.copy() and
        prompt.align_tensor.copy().
        """

        # Initialise the tensor.
        self.align_tensor_fns.init(tensor='Pf1',
                                   align_id='Pf1',
                                   params=(-16.6278, 6.13037, 7.65639,
                                           -1.89157, 19.2561),
                                   scale=1.0,
                                   angle_units='rad',
                                   param_types=0)

        # Change the current data pipe.
        pipes.switch('test')

        # Get the data pipe.
        dp = pipes.get_pipe('test')

        # Copy the tensor to the test pipe.
        self.align_tensor_fns.copy(tensor_from='Pf1',
                                   pipe_from='orig',
                                   tensor_to='Pf1')

        # Test the alignment tensor.
        self.assertEqual(dp.align_tensors[0].Sxx, -16.6278)
        self.assertEqual(dp.align_tensors[0].Syy, 6.13037)
        self.assertEqual(dp.align_tensors[0].Sxy, 7.65639)
        self.assertEqual(dp.align_tensors[0].Sxz, -1.89157)
        self.assertAlmostEqual(dp.align_tensors[0].Syz, 19.2561)
示例#10
0
    def __init__(self, pipe_name=None, pipe_bundle=None, file_root='noe', results_dir=None, save_state=True):
        """Perform relaxation curve fitting.

        To use this auto-analysis, a data pipe with all the required data needs to be set up.  This data pipe should contain the following:

            - All the spins loaded.
            - Unresolved spins deselected.
            - The NOE peak intensities from the saturated and reference spectra.
            - Either the baseplane noise RMDS values should be set or replicated spectra loaded.

        @keyword pipe_name:     The name of the data pipe containing all of the data for the analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:      str
        @keyword file_root:     File root of the output filea.
        @type file_root:        str
        @keyword results_dir:   The directory where results files are saved.
        @type results_dir:      str
        @keyword save_state:    A flag which if True will cause a relax save state to be created at the end of the analysis.
        @type save_state:       bool
        """

        # Execution lock.
        status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

        # Set up the analysis status object.
        status.init_auto_analysis(pipe_bundle, type='noe')
        status.current_analysis = pipe_bundle

        # Store the args.
        self.save_state = save_state
        self.pipe_name = pipe_name
        self.pipe_bundle = pipe_bundle
        self.file_root = file_root
        self.results_dir = results_dir
        if self.results_dir:
            self.grace_dir = results_dir + sep + 'grace'
        else:
            self.grace_dir = 'grace'

        # Data checks.
        self.check_vars()

        # Set the data pipe to the current data pipe.
        if self.pipe_name != cdp_name():
            switch(self.pipe_name)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Execute.
        self.run()

        # Finish and unlock execution.
        status.auto_analysis[self.pipe_bundle].fin = True
        status.current_analysis = None
        status.exec_lock.release()
示例#11
0
    def setUp(self):
        """Set up for all the residue unit tests."""

        # Add a data pipe to the data store.
        ds.add(pipe_name='orig', pipe_type='mf')

        # Add a second data pipe for copying tests.
        ds.add(pipe_name='test', pipe_type='mf')

        # Set the current data pipe to 'orig'.
        pipes.switch('orig')
    def setUp(self):
        """Set up for all the molecule unit tests."""

        # Add a data pipe to the data store.
        ds.add(pipe_name='orig', pipe_type='mf')

        # Add a second data pipe for copying tests.
        ds.add(pipe_name='test', pipe_type='mf')

        # Set the current data pipe to 'orig'.
        pipes.switch('orig')
示例#13
0
    def pipe_switch(self, event):
        """Switch to the selected date pipe.

        @param event:   The wx event.
        @type event:    wx event
        """

        # Switch to the selected data pipe.
        switch(self.selected_pipe)

        # Bug fix for MS Windows.
        wx.CallAfter(self.Raise)
示例#14
0
    def pipe_switch(self, event):
        """Switch to the selected date pipe.

        @param event:   The wx event.
        @type event:    wx event
        """

        # Switch to the selected data pipe.
        switch(self.selected_pipe)

        # Bug fix for MS Windows.
        wx.CallAfter(self.Raise)
    def setUp(self):
        """Set up for all the relaxation data unit tests."""

        # Add a data pipe to the data store.
        ds.add(pipe_name='orig', pipe_type='mf')

        # Add a second data pipe for copying tests.
        ds.add(pipe_name='test', pipe_type='mf')

        # Set the current data pipe to 'orig'.
        pipes.switch('orig')

        # The Ap4Aase 600 MHz NOE data.
        self.Ap4Aase_600_NOE_val = [None, None, None, 0.12479588727508535, 0.42240815792914105, 0.45281703194372114, 0.60727570079478255, 0.63871921623680161, None, None, None, 0.92927160307645906, 0.88832516377296256, 0.84945042565860407, 0.73027277534135793, 1.0529350986375761, 0.80025161548578949, 0.9225805138227271, None, 0.83690702968916975, 0.82750462671474634, 0.94498415442235661, None, 0.8935097799257431, 0.86456261089305875, 0.74923159572687958, 0.82028906681170666, 0.95078138769755005, 0.88196946543481614, 0.88560694800603623, None, 0.93583460370655014, 0.83709220285834895, 0.77065893466772672, 0.74898049254126575, 0.75473259762308997, None, 0.72339922138816593, 0.7409139945787665, 0.81036305956996824, 0.93259428996098348, None, None, None, 0.97484276729559749, 0.79870627747000578, 0.77846459298477833, 0.85891945210952814, 0.82545651205700832, 0.77308724653857397, 0.83873490797355599, 0.78962147119962445, None, None, 0.83658554344066838, 0.94444774229292805, 0.88892100988408906, 0.89074818049490534, 0.93798213161209065, 0.89579384792870853, 0.90689840050040216, 0.86826627855975114, 0.88998453873904826, 0.93193995326551327, 0.91380634390651083, 0.86088897739301773, 0.91200692603214106, 0.89667919287639897, 0.95894205272847033, 0.83602820831090652, 0.91434697423385458, 0.78332056488518564, 0.82655263496972042, 0.82607341906155618, 0.88040402181165589, 0.8440486006693505, None, 0.82043041764520075, 0.78703432521087158, 0.82699368240002646, 0.85174803791662423, None, 0.84885669819226628, 0.89183703777040746, None, None, 0.92544635676371245, 0.68656513923277818, 0.72560011690157689, 0.69795502821734268, 0.57079416490593249, None, 0.71563067539320835, None, 0.50533076429030188, None, 0.75242786769880365, 0.70895981542011155, None, 0.33272491279305588, None, 0.90094329970739295, 0.88707256046705585, 0.87992586160833552, 0.79195727809693339, 0.91358573817741873, 0.90242759014288332, 0.97529004068103053, 0.88453257922238127, 0.93282837259539797, 0.80050317711189245, 0.87281500262478917, 0.82161925495299371, 0.75331847553368936, 0.86583135026629754, 0.8423490949685033, 0.75544980660586103, 0.89663290907940885, 0.86241114220463833, 0.865189333746754, 0.76072465838213588, 0.8627935013491016, 0.58872842981275242, None, 0.66265488495054237, None, 0.6175482423717148, 0.57128306878306878, None, 0.65783414097673298, 0.78352915459861194, 0.90660549423688019, 0.69562244671213447, 0.8986224709427465, 0.93456283575144872, None, 0.96438783132840478, 0.88494476363170493, None, 0.81450919045756742, None, 0.89273806940361811, 0.89784704409243976, 0.89409823895739682, None, 0.75626758626525903, None, 0.81861161936806948, 0.77704881157681638, 0.93492416343713725, 0.80529201617441148, None, 0.75214448729046979, 0.77939624899611037, 0.88957406230133507, 0.83119933716570005, 0.91593660447979419, 1.0269367764915405, 0.95254605768690148, 0.80783819302725635, 0.91264712309949736, 0.87414218862982118, None, 0.8457055541736257, 0.7976400443272097]
        self.Ap4Aase_600_NOE_err = [None, None, None, 0.020551827436105764, 0.02016346825976852, 0.026272719841642134, 0.032369427242382849, 0.024695665815261791, None, None, None, 0.059569089743604184, 0.044119641308479306, 0.060533543601110441, 0.054366133132835504, 0.10226383618816391, 0.05217226473549319, 0.040042471153624366, None, 0.043355836219158402, 0.070804231151989958, 0.045958118280731972, None, 0.040080159235713876, 0.025516073550159439, 0.031147400155540676, 0.029551538089533019, 0.058781807250738359, 0.053798141218956298, 0.04058564845028198, None, 0.049964861028149038, 0.056277751722041303, 0.055862610530979066, 0.062426501508834664, 0.037655461974785032, None, 0.037437746270300623, 0.031899198795702917, 0.05883492648236429, 0.049025044758579737, None, None, None, 0.061101732084737577, 0.039371899134381119, 0.047660696280181554, 0.047240554321579087, 0.023582035074293468, 0.047687722096308678, 0.023196186303095979, 0.030389229790461863, None, None, 0.033964837875935606, 0.039442739174763576, 0.02652568603842493, 0.044926643986551523, 0.039072857662117114, 0.045558051667044089, 0.034817385027641354, 0.037932460586176255, 0.04416504039747577, 0.046692690948839703, 0.065323967027699076, 0.039701400872345881, 0.042298623934643773, 0.079080932418819722, 0.067928676079858738, 0.020489325306093879, 0.036174251087283844, 0.024485137678863723, 0.059736004586569386, 0.028861627878037942, 0.029401903503258862, 0.045460315556323593, None, 0.02537282712438679, 0.053007727294934082, 0.061355070294094288, 0.047992183164804886, None, 0.056269021809127781, 0.063703951001952613, None, None, 0.10564633090541133, 0.057727260927784539, 0.046307285409240992, 0.043081197184003071, 0.040201168842464927, None, 0.029680853171297025, None, 0.13110007530113085, None, 0.055416928281966663, 0.066345573350012677, None, 0.11395960586085771, None, 0.079438544471658351, 0.064210071948445219, 0.063995466359721184, 0.058755076446370007, 0.06960783712536886, 0.079202821774062071, 0.055639087817357311, 0.044727670551887241, 0.054122883235643848, 0.039242551779018732, 0.042620700259960162, 0.044970961360827032, 0.16191971307506142, 0.03075410906877173, 0.029862890782436795, 0.020403875440027055, 0.027212286424039007, 0.030489671728569135, 0.036801376842568193, 0.04239547098051423, 0.031102853835183651, 0.019647495159139456, None, 0.018633796971680151, None, 0.016649721653701773, 0.021020898014190763, None, 0.038324025574230064, 0.039477530263583097, 0.073251142306895001, 0.029252551207069242, 0.029800820512347403, 0.04190589192257109, None, 0.051973164180429951, 0.037885698528167658, None, 0.028795497444627904, None, 0.038102768163308702, 0.047105957099339957, 0.031888501753176597, None, 0.037573735892664904, None, 0.045323274177265373, 0.067437069694904428, 0.077974178652576179, 0.047904023844685867, None, 0.046470703715260887, 0.056734780619811562, 0.040848702413964474, 0.031770841283563625, 0.045827271513353023, 0.062189928910417011, 0.037543295138971443, 0.039174104595193118, 0.0406911452975608, 0.026051431683394789, None, 0.033339638549660086, 0.028434080259488268]
示例#16
0
    def test_switch(self):
        """Test the switching of the current data pipe.

        The function tested is pipe_control.pipes.switch().
        """

        # Switch to the 'orig' data pipe.
        pipes.switch('orig')

        # Test the current data pipe.
        self.assertEqual(pipes.cdp_name(), 'orig')

        # Switch to the 'empty' data pipe.
        pipes.switch('empty')

        # Test the current data pipe.
        self.assertEqual(pipes.cdp_name(), 'empty')
示例#17
0
    def test_switch(self):
        """Test the switching of the current data pipe.

        The function tested is pipe_control.pipes.switch().
        """

        # Switch to the 'orig' data pipe.
        pipes.switch('orig')

        # Test the current data pipe.
        self.assertEqual(pipes.cdp_name(), 'orig')

        # Switch to the 'empty' data pipe.
        pipes.switch('empty')

        # Test the current data pipe.
        self.assertEqual(pipes.cdp_name(), 'empty')
示例#18
0
    def setUp(self):
        """Set up for all the data pipe unit tests."""

        # Add a data pipe to the data store.
        ds.add(pipe_name='orig', pipe_type='mf')

        # Add a single object to the 'orig' data pipe.
        ds['orig'].x = 1

        # Add a single object to the single spin system of the 'orig' data pipe.
        ds['orig'].mol[0].res[0].spin[0].num = 1

        # Add an empty data pipe (for the 'eliminate_unused_pipes' test).
        ds.add(pipe_name='empty', pipe_type='mf')

        # Set the current pipe to the 'orig' data pipe.
        pipes.switch('orig')
示例#19
0
    def setUp(self):
        """Set up for all the data pipe unit tests."""

        # Add a data pipe to the data store.
        ds.add(pipe_name='orig', pipe_type='mf')

        # Add a single object to the 'orig' data pipe.
        ds['orig'].x = 1

        # Add a single object to the single spin system of the 'orig' data pipe.
        ds['orig'].mol[0].res[0].spin[0].num = 1

        # Add an empty data pipe (for the 'eliminate_unused_pipes' test).
        ds.add(pipe_name='empty', pipe_type='mf')

        # Set the current pipe to the 'orig' data pipe.
        pipes.switch('orig')
示例#20
0
    def test_deletion(self):
        """Test the deletion of a data pipe.

        The function tested is pipe_control.pipes.delete()
        """

        # Set the current pipe to the 'orig' data pipe.
        name = 'orig'
        pipes.switch(name)

        # Delete the 'orig' data pipe.
        pipes.delete(name)

        # Test that the data pipe no longer exists.
        self.assert_(name not in ds)

        # Test that the current pipe is None (as the current pipe was deleted).
        self.assertEqual(pipes.cdp_name(), None)
示例#21
0
    def test_deletion(self):
        """Test the deletion of a data pipe.

        The function tested is pipe_control.pipes.delete()
        """

        # Set the current pipe to the 'orig' data pipe.
        name = 'orig'
        pipes.switch(name)

        # Delete the 'orig' data pipe.
        pipes.delete(name)

        # Test that the data pipe no longer exists.
        self.assert_(name not in ds)

        # Test that the current pipe is None (as the current pipe was deleted).
        self.assertEqual(pipes.cdp_name(), None)
示例#22
0
    def test_partition_params4(self):
        """Forth test of the pipe_control.value.partition_params() function."""

        # Set the current data pipe to 'mf'.
        pipes.switch('mf')

        # The parameters and values.
        param = ['Dx', 's2', 'csa']
        val = [1e7, 0.8, -160e-6]

        # Partition.
        spin_params, spin_values, other_params, other_values = value.partition_params(val, param)

        # Tests.
        self.assertEqual(spin_params, ['s2', 'csa'])
        self.assertEqual(spin_values, [0.8, -160e-6])
        self.assertEqual(other_params, ['Dx'])
        self.assertEqual(other_values, [1e7])
示例#23
0
    def test_partition_params2(self):
        """Second test of the pipe_control.value.partition_params() function."""

        # Set the current data pipe to 'mf'.
        pipes.switch('mf')

        # The parameters and values.
        param = ['Dx']
        val = [1e7]

        # Partition.
        spin_params, spin_values, other_params, other_values = value.partition_params(val, param)

        # Tests.
        self.assertEqual(spin_params, [])
        self.assertEqual(spin_values, [])
        self.assertEqual(other_params, ['Dx'])
        self.assertEqual(other_values, [1e7])
示例#24
0
    def update_pipes(self, event=None):
        """Update the spin view data pipe selector.

        @keyword event: The wx event.
        @type event:    wx event
        """

        # Change the cursor to busy.
        wx.BeginBusyCursor()

        # Init.
        pipe_switch = False

        # The selected pipe.
        if event:
            # The name of the selected pipe.
            pipe = gui_to_str(self.pipe_name.GetString(event.GetSelection()))

            # A pipe change.
            if pipe != cdp_name():
                pipe_switch = True
        else:
            pipe = cdp_name()
        if not pipe:
            pipe = ''

        # Clear the previous data.
        self.pipe_name.Clear()

        # The list of pipe names.
        for name in pipe_names():
            self.pipe_name.Append(str_to_gui(name))

        # Switch data pipes.
        if pipe_switch:
            switch(pipe)

        # Set the pipe name to the cdp.
        self.pipe_name.SetValue(str_to_gui(pipe))

        # Reset the cursor.
        if wx.IsBusy():
            wx.EndBusyCursor()
示例#25
0
    def test_partition_params2(self):
        """Second test of the pipe_control.value.partition_params() function."""

        # Set the current data pipe to 'mf'.
        pipes.switch('mf')

        # The parameters and values.
        param = ['Dx']
        val = [1e7]

        # Partition.
        spin_params, spin_values, other_params, other_values = value.partition_params(
            val, param)

        # Tests.
        self.assertEqual(spin_params, [])
        self.assertEqual(spin_values, [])
        self.assertEqual(other_params, ['Dx'])
        self.assertEqual(other_values, [1e7])
示例#26
0
    def test_partition_params4(self):
        """Forth test of the pipe_control.value.partition_params() function."""

        # Set the current data pipe to 'mf'.
        pipes.switch('mf')

        # The parameters and values.
        param = ['Dx', 's2', 'csa']
        val = [1e7, 0.8, -160e-6]

        # Partition.
        spin_params, spin_values, other_params, other_values = value.partition_params(
            val, param)

        # Tests.
        self.assertEqual(spin_params, ['s2', 'csa'])
        self.assertEqual(spin_values, [0.8, -160e-6])
        self.assertEqual(other_params, ['Dx'])
        self.assertEqual(other_values, [1e7])
示例#27
0
    def update_pipes(self, event=None):
        """Update the spin view data pipe selector.

        @keyword event: The wx event.
        @type event:    wx event
        """

        # Change the cursor to busy.
        wx.BeginBusyCursor()

        # Init.
        pipe_switch = False

        # The selected pipe.
        if event:
            # The name of the selected pipe.
            pipe = gui_to_str(self.pipe_name.GetString(event.GetSelection()))

            # A pipe change.
            if pipe != cdp_name():
                pipe_switch = True
        else:
            pipe = cdp_name()
        if not pipe:
            pipe = ''

        # Clear the previous data.
        self.pipe_name.Clear()

        # The list of pipe names.
        for name in pipe_names():
            self.pipe_name.Append(str_to_gui(name))

        # Switch data pipes.
        if pipe_switch:
            switch(pipe)

        # Set the pipe name to the cdp.
        self.pipe_name.SetValue(str_to_gui(pipe))

        # Reset the cursor.
        if wx.IsBusy():
            wx.EndBusyCursor()
示例#28
0
    def _hybridise(self, hybrid=None, pipe_list=None):
        """Create the hybrid data pipe.

        @keyword hybrid:    The name of the new hybrid data pipe.
        @type hybrid:       str
        @keyword pipe_list: The list of data pipes that the hybrid is composed of.
        @type pipe_list:    list of str
        """

        # Test if the hybrid data pipe already exists.
        if hybrid in pipes.pipe_names():
            raise RelaxPipeError(hybrid)

        # Loop over the pipes to be hybridised and check them.
        pipe_type = pipes.get_type(pipe_list[0])
        for pipe in pipe_list:
            # Switch to the data pipe.
            pipes.switch(pipe)

            # Test if the pipe exists.
            check_pipe()

            # Check that the pipe types match.
            if pipes.get_type() != pipe_type:
                raise RelaxError("The data pipe types do not match.")

            # Test if sequence data is loaded.
            if not exists_mol_res_spin_data():
                raise RelaxNoSequenceError

        # Check that the sequence data matches in all pipes.
        for i in range(1, len(pipe_list)):
            compare_sequence(pipe_list[0], pipe_list[1])

        # Create the data pipe.
        pipes.create(pipe_name=hybrid, pipe_type='hybrid')

        # Store the pipe list forming the hybrid.
        cdp.hybrid_pipes = pipe_list
示例#29
0
文件: hybrid.py 项目: tlinnet/relax
    def _hybridise(self, hybrid=None, pipe_list=None):
        """Create the hybrid data pipe.

        @keyword hybrid:    The name of the new hybrid data pipe.
        @type hybrid:       str
        @keyword pipe_list: The list of data pipes that the hybrid is composed of.
        @type pipe_list:    list of str
        """

        # Test if the hybrid data pipe already exists.
        if hybrid in pipes.pipe_names():
            raise RelaxPipeError(hybrid)

        # Loop over the pipes to be hybridised and check them.
        pipe_type = pipes.get_type(pipe_list[0])
        for pipe in pipe_list:
            # Switch to the data pipe.
            pipes.switch(pipe)

            # Test if the pipe exists.
            check_pipe()

            # Check that the pipe types match.
            if pipes.get_type() != pipe_type:
                raise RelaxError("The data pipe types do not match.")

            # Test if sequence data is loaded.
            if not exists_mol_res_spin_data():
                raise RelaxNoSequenceError

        # Check that the sequence data matches in all pipes.
        for i in range(1, len(pipe_list)):
            compare_sequence(pipe_list[0], pipe_list[1])

        # Create the data pipe.
        pipes.create(pipe_name=hybrid, pipe_type='hybrid')

        # Store the pipe list forming the hybrid.
        cdp.hybrid_pipes = pipe_list
    def test_copy_pull_sphere(self):
        """Test the copying of a spherical diffusion tensor (pulling the data from another pipe).

        The functions tested are both pipe_control.diffusion_tensor.copy() and
        prompt.diffusion_tensor.copy().
        """

        # Initialise the tensor.
        self.diffusion_tensor_fns.init(params=1e-9)

        # Change the current data pipe.
        pipes.switch('test')

        # Get the data pipe.
        dp = pipes.get_pipe('test')

        # Copy the tensor to the test pipe.
        self.diffusion_tensor_fns.copy(pipe_from='orig')

        # Test the diffusion tensor 
        self.assertEqual(dp.diff_tensor.type, 'sphere')
        self.assertEqual(dp.diff_tensor.tm, 1e-9)
        self.assertEqual(dp.diff_tensor.fixed, 1)
    def test_copy_pull_sphere(self):
        """Test the copying of a spherical diffusion tensor (pulling the data from another pipe).

        The functions tested are both pipe_control.diffusion_tensor.copy() and
        prompt.diffusion_tensor.copy().
        """

        # Initialise the tensor.
        self.diffusion_tensor_fns.init(params=1e-9)

        # Change the current data pipe.
        pipes.switch('test')

        # Get the data pipe.
        dp = pipes.get_pipe('test')

        # Copy the tensor to the test pipe.
        self.diffusion_tensor_fns.copy(pipe_from='orig')

        # Test the diffusion tensor
        self.assertEqual(dp.diff_tensor.type, 'sphere')
        self.assertEqual(dp.diff_tensor.tm, 1e-9)
        self.assertEqual(dp.diff_tensor.fixed, 1)
示例#32
0
文件: noe.py 项目: tlinnet/relax
    def __init__(self,
                 pipe_name=None,
                 pipe_bundle=None,
                 file_root='noe',
                 results_dir=None,
                 save_state=True):
        """Perform relaxation curve fitting.

        To use this auto-analysis, a data pipe with all the required data needs to be set up.  This data pipe should contain the following:

            - All the spins loaded.
            - Unresolved spins deselected.
            - The NOE peak intensities from the saturated and reference spectra.
            - Either the baseplane noise RMSD values should be set or replicated spectra loaded.

        @keyword pipe_name:     The name of the data pipe containing all of the data for the analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:      str
        @keyword file_root:     File root of the output filea.
        @type file_root:        str
        @keyword results_dir:   The directory where results files are saved.
        @type results_dir:      str
        @keyword save_state:    A flag which if True will cause a relax save state to be created at the end of the analysis.
        @type save_state:       bool
        """

        # Initial printout.
        title(file=sys.stdout, text="Steady-state NOE auto-analysis")

        # Safely execute the full protocol.
        try:
            # Execution lock.
            status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

            # Set up the analysis status object.
            status.init_auto_analysis(pipe_bundle, type='noe')
            status.current_analysis = pipe_bundle

            # Store the args.
            self.save_state = save_state
            self.pipe_name = pipe_name
            self.pipe_bundle = pipe_bundle
            self.file_root = file_root
            self.results_dir = results_dir
            if self.results_dir:
                self.grace_dir = results_dir + sep + 'grace'
            else:
                self.grace_dir = 'grace'

            # Data checks.
            self.check_vars()

            # Set the data pipe to the current data pipe.
            if self.pipe_name != cdp_name():
                switch(self.pipe_name)

            # Load the interpreter.
            self.interpreter = Interpreter(show_script=False,
                                           raise_relax_error=True)
            self.interpreter.populate_self()
            self.interpreter.on(verbose=False)

            # Execute.
            self.run()

        # Clean up.
        finally:
            # Final printout.
            title(file=sys.stdout,
                  text="Completion of the steady-state NOE auto-analysis")
            print_elapsed_time(time() - status.start_time)

            # Finish and unlock execution.
            status.auto_analysis[self.pipe_bundle].fin = True
            status.current_analysis = None
            status.exec_lock.release()
示例#33
0
    def setUp(self):
        """Set up for all the spin unit tests.

        The data contained within the 'orig' data pipe is:

        ID      Molecule        Res number      Res name        Spin number     Spin name
        0,0,0   Old mol         1               Ala             111             C8
        0,0,1   Old mol         1               Ala             6               C19
        0,0,2   Old mol         1               Ala             7               C21
        0,0,3   Old mol         1               Ala             8               C24
        0,0,4   Old mol         1               Ala             9               C26
        0,1,0   Old mol         2               Arg             78              NH
        1,0,0   New mol         5               Lys             239             NH
        1,1,0   New mol         6               Thr             None            None
        1,1,1   New mol         6               Thr             3239            NH

        The IDs correspond to the molecule, residue and spin indices.
        """

        # Add a data pipe to the data store.
        ds.add(pipe_name='orig', pipe_type='mf')

        # Add a second data pipe for copying tests.
        ds.add(pipe_name='test', pipe_type='mf')

        # Set the current data pipe to 'orig'.
        pipes.switch('orig')

        # Name the first molecule.
        cdp.mol[0].name = 'Old mol'

        # Create the first residue and add some data to its spin container.
        cdp.mol[0].res[0].num = 1
        cdp.mol[0].res[0].name = 'Ala'
        cdp.mol[0].res[0].spin[0].num = 111
        cdp.mol[0].res[0].spin[0].name = 'C8'
        cdp.mol[0].res[0].spin[0].x = 1

        # Add some more spins.
        cdp.mol[0].res[0].spin.add_item('C19', 6)
        cdp.mol[0].res[0].spin.add_item('C21', 7)
        cdp.mol[0].res[0].spin.add_item('C24', 8)
        cdp.mol[0].res[0].spin.add_item('C26', 9)

        # Create a second residue.
        cdp.mol[0].res.add_item('Arg', 2)
        cdp.mol[0].res[1].spin[0].num = 78
        cdp.mol[0].res[1].spin[0].name = 'NH'

        # Create a second molecule.
        cdp.mol.add_item('New mol')

        # Create the first and second residue of the second molecule and add some data to its spin container.
        cdp.mol[1].res[0].num = 5
        cdp.mol[1].res[0].name = 'Lys'
        cdp.mol[1].res[0].spin[0].num = 239
        cdp.mol[1].res[0].spin[0].name = 'NH'
        cdp.mol[1].res.add_item('Thr', 6)
        cdp.mol[1].res[1].spin.add_item(None, 1433)
        cdp.mol[1].res[1].spin.add_item('NH', 3239)

        # Create a third molecule.
        cdp.mol.add_item('3rd')

        # Create the first residue of the 3rd molecule and add some data to its spin container.
        cdp.mol[2].res[0].num = 13
        cdp.mol[2].res[0].name = 'Gly'
        cdp.mol[2].res[0].spin[0].x = 'hello'

        # Update the metadata.
        metadata_update()
示例#34
0
    def test_tp02_data_to_tp02(self):
        """Test the GUI analysis with the relaxation dispersion 'TP02' model fitting to the 'TP02' synthetic data."""

        # The paths to the data files.
        data_path = status.install_path + sep + 'test_suite' + sep + 'shared_data' + sep + 'dispersion' + sep + 'r1rho_off_res_tp02' + sep

        # Simulate the new analysis wizard, selecting the fixed time CPMG experiment.
        analysis = self.new_analysis_wizard(analysis_type='disp')

        # Change the results directory.
        analysis.field_results_dir.SetValue(str_to_gui(ds.tmpdir))

        # Create the sequence data.
        self._execute_uf(uf_name='spin.create',
                         res_name='Trp',
                         res_num=1,
                         spin_name='N')
        interpreter.flush()
        self._execute_uf(uf_name='spin.create',
                         res_name='Trp',
                         res_num=2,
                         spin_name='N')
        interpreter.flush()
        self._execute_uf(uf_name='sequence.display')
        interpreter.flush()

        # Set up the nuclear isotopes.
        analysis.spin_isotope()
        uf_store['spin.isotope'].page.SetValue('spin_id', '')
        uf_store['spin.isotope'].wizard._go_next()
        interpreter.flush()  # Required because of the asynchronous uf call.

        # Load the chemical shift data.
        self._execute_uf(uf_name='chemical_shift.read',
                         file='ref_500MHz.list',
                         dir=data_path)
        interpreter.flush()

        # The spectral data.
        frq = [500, 800]
        frq_label = ['500MHz', '800MHz']
        error = 200000.0
        data = []
        spin_lock = [
            None, 1000.0, 1500.0, 2000.0, 2500.0, 3000.0, 3500.0, 4000.0,
            4500.0, 5000.0, 5500.0, 6000.0
        ]
        for frq_index in range(len(frq)):
            for spin_lock_index in range(len(spin_lock)):
                # The reference.
                if spin_lock[spin_lock_index] == None:
                    id = 'ref_%s' % frq_label[frq_index]
                    file = "ref_%s.list" % frq_label[frq_index]

                # Normal data.
                else:
                    id = "nu_%s_%s" % (spin_lock[spin_lock_index],
                                       frq_label[frq_index])
                    file = "nu_%s_%s.list" % (spin_lock[spin_lock_index],
                                              frq_label[frq_index])

                # Append the data.
                data.append(
                    [id, file, spin_lock[spin_lock_index], frq[frq_index]])

        # Load the R1 data.
        for frq_index in range(len(frq)):
            label = 'R1_%s' % frq_label[frq_index]
            self._execute_uf(uf_name='relax_data.read',
                             ri_id=label,
                             ri_type='R1',
                             frq=frq[frq_index] * 1e6,
                             file='%s.out' % label,
                             dir=data_path,
                             mol_name_col=1,
                             res_num_col=2,
                             res_name_col=3,
                             spin_num_col=4,
                             spin_name_col=5,
                             data_col=6,
                             error_col=7)
            interpreter.flush()

        # Set up the peak intensity wizard.
        analysis.peak_wizard_launch(None)
        wizard = analysis.peak_wizard

        # The spectra.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='read',
                              file=data_path + file,
                              spectrum_id=id,
                              int_method='height',
                              dim=1)
            wizard._apply(None)
        wizard._skip(None)

        # The error type.
        page = wizard.get_page(wizard.page_indices['err_type'])
        page.selection = 'rmsd'
        wizard._go_next(None)

        # Baseplane RMSD.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='rmsd', spectrum_id=id, error=error)
            wizard._apply(None)
        wizard._skip(None)

        # The experiment type.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='exp_type',
                              spectrum_id=id,
                              exp_type='R1rho')
            wizard._apply(None)
        wizard._skip(None)

        # Set the spectrometer frequency.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='spectrometer_frequency',
                              id=id,
                              frq=H_frq,
                              units='MHz')
            wizard._apply(None)
        wizard._skip(None)

        # Set the relaxation times.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='relax_time', spectrum_id=id, time=0.1)
            wizard._apply(None)
        wizard._skip(None)

        # Set the relaxation dispersion spin-lock field strength (nu1).
        for id, file, field, H_frq in data:
            wizard.setup_page(page='spin_lock_field',
                              spectrum_id=id,
                              field=field)
            wizard._apply(None)
        wizard._skip(None)

        # Set the spin-lock offset.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='spin_lock_offset',
                              spectrum_id=id,
                              offset=110.0)
            wizard._apply(None)
        wizard._skip(None)

        # Flush all wx events (to allow the spectrum list GUI element to populate all its rows).
        wx.Yield()

        # Simulate right clicking in the spectrum list element to test the popup menu.
        analysis.peak_intensity.on_right_click(Fake_right_click())

        # Simulate the popup menu entries to catch bugs there (just apply the user functions with the currently set values).
        # FIXME: skipping the checks for certain wxPython bugs.
        if status.relax_mode != 'gui' and wx.version(
        ) != '2.9.4.1 gtk2 (classic)':
            analysis.peak_intensity.action_relax_disp_spin_lock_field(item=4)
            uf_store['relax_disp.spin_lock_field'].wizard._go_next()
            interpreter.flush()
            analysis.peak_intensity.action_relax_disp_exp_type(item=5)
            uf_store['relax_disp.exp_type'].wizard._go_next()
            interpreter.flush()
            analysis.peak_intensity.action_relax_disp_relax_time(item=0)
            uf_store['relax_disp.relax_time'].wizard._go_next()
            interpreter.flush()
            analysis.peak_intensity.action_spectrometer_frq(item=10)
            uf_store['spectrometer.frequency'].wizard._go_next()
            interpreter.flush()

        # Deselect all but the 'TP02' model.
        models = [MODEL_R2EFF, MODEL_NOREX, MODEL_TP02]
        for i in range(len(analysis.model_field.models_stripped)):
            if analysis.model_field.models_stripped[i] in models:
                analysis.model_field.select[i] = True
            else:
                analysis.model_field.select[i] = False
        analysis.model_field.modify()

        # Set the grid search size and number of MC sims.
        analysis.grid_inc.SetValue(4)
        analysis.mc_sim_num.SetValue(3)

        # Optimisation speedups.
        analysis.opt_func_tol = 1e-10
        analysis.opt_max_iterations = 10000

        # Execute relax.
        analysis.execute(
            wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED,
                            analysis.button_exec_relax.GetId()))

        # Wait for execution to complete.
        analysis.thread.join()

        # Flush all wx events.
        wx.Yield()

        # Exceptions in the thread.
        self.check_exceptions()

        # Check the relax controller.
        # FIXME: skipping the checks for certain wxPython bugs.
        if status.relax_mode != 'gui' and wx.version(
        ) != '2.9.4.1 gtk2 (classic)':
            self.assertEqual(self.app.gui.controller.mc_gauge_rx.GetValue(),
                             100)
            self.assertEqual(self.app.gui.controller.main_gauge.GetValue(),
                             100)

        # The original parameters.
        r1rho_prime = [[10.0, 15.0], [12.0, 18.0]]
        pA = 0.7654321
        kex = 1234.56789
        delta_omega = [7.0, 9.0]

        # The R20 keys.
        r20_key1 = generate_r20_key(exp_type=EXP_TYPE_R1RHO, frq=500e6)
        r20_key2 = generate_r20_key(exp_type=EXP_TYPE_R1RHO, frq=800e6)

        # Switch to the 'TP02' model data pipe, then check for each spin.
        switch("%s - %s" % ('TP02', get_bundle()))
        spin_index = 0
        for spin, spin_id in spin_loop(return_id=True):
            # Printout.
            print("\nSpin %s." % spin_id)

            # Check the fitted parameters.
            self.assertAlmostEqual(spin.r2[r20_key1] / 10,
                                   r1rho_prime[spin_index][0] / 10, 4)
            self.assertAlmostEqual(spin.r2[r20_key2] / 10,
                                   r1rho_prime[spin_index][1] / 10, 4)
            self.assertAlmostEqual(spin.dw, delta_omega[spin_index], 3)
            self.assertAlmostEqual(spin.kex / 1000.0, kex / 1000.0, 3)

            # Increment the spin index.
            spin_index += 1
示例#35
0
    def setUp(self):
        """Set up for all the spin unit tests.

        The data contained within the 'orig' data pipe is:

        ID      Molecule        Res number      Res name        Spin number     Spin name
        0,0,0   Old mol         1               Ala             111             C8
        0,0,1   Old mol         1               Ala             6               C19
        0,0,2   Old mol         1               Ala             7               C21
        0,0,3   Old mol         1               Ala             8               C24
        0,0,4   Old mol         1               Ala             9               C26
        0,1,0   Old mol         2               Arg             78              NH
        1,0,0   New mol         5               Lys             239             NH
        1,1,0   New mol         6               Thr             None            None
        1,1,1   New mol         6               Thr             3239            NH

        The IDs correspond to the molecule, residue and spin indices.
        """

        # Add a data pipe to the data store.
        ds.add(pipe_name='orig', pipe_type='mf')

        # Add a second data pipe for copying tests.
        ds.add(pipe_name='test', pipe_type='mf')

        # Set the current data pipe to 'orig'.
        pipes.switch('orig')

        # Name the first molecule.
        cdp.mol[0].name = 'Old mol'

        # Create the first residue and add some data to its spin container.
        cdp.mol[0].res[0].num = 1
        cdp.mol[0].res[0].name = 'Ala'
        cdp.mol[0].res[0].spin[0].num = 111
        cdp.mol[0].res[0].spin[0].name = 'C8'
        cdp.mol[0].res[0].spin[0].x = 1

        # Add some more spins.
        cdp.mol[0].res[0].spin.add_item('C19', 6)
        cdp.mol[0].res[0].spin.add_item('C21', 7)
        cdp.mol[0].res[0].spin.add_item('C24', 8)
        cdp.mol[0].res[0].spin.add_item('C26', 9)

        # Create a second residue.
        cdp.mol[0].res.add_item('Arg', 2)
        cdp.mol[0].res[1].spin[0].num = 78
        cdp.mol[0].res[1].spin[0].name = 'NH'

        # Create a second molecule.
        cdp.mol.add_item('New mol')

        # Create the first and second residue of the second molecule and add some data to its spin container.
        cdp.mol[1].res[0].num = 5
        cdp.mol[1].res[0].name = 'Lys'
        cdp.mol[1].res[0].spin[0].num = 239
        cdp.mol[1].res[0].spin[0].name = 'NH'
        cdp.mol[1].res.add_item('Thr', 6)
        cdp.mol[1].res[1].spin.add_item(None, 1433)
        cdp.mol[1].res[1].spin.add_item('NH', 3239)

        # Create a third molecule.
        cdp.mol.add_item('3rd')

        # Create the first residue of the 3rd molecule and add some data to its spin container.
        cdp.mol[2].res[0].num = 13
        cdp.mol[2].res[0].name = 'Gly'
        cdp.mol[2].res[0].spin[0].x = 'hello'

        # Update the metadata.
        metadata_update()
示例#36
0
文件: hybrid.py 项目: tlinnet/relax
    def model_statistics(self,
                         model_info=None,
                         spin_id=None,
                         global_stats=None):
        """Return the k, n, and chi2 model statistics of the hybrid.

        k - number of parameters.
        n - number of data points.
        chi2 - the chi-squared value.


        @keyword model_index:   The model index.  This is zero for the global models or equal to the
                                global spin index (which covers the molecule, residue, and spin
                                indices).  This originates from the model_loop().
        @type model_index:      int
        @keyword spin_id:       The spin identification string.  Either this or the instance keyword
                                argument must be supplied.
        @type spin_id:          None or str
        @keyword global_stats:  A parameter which determines if global or local statistics are
                                returned.  If None, then the appropriateness of global or local
                                statistics is automatically determined.
        @type global_stats:     None or bool
        @return:                The optimisation statistics, in tuple format, of the number of
                                parameters (k), the number of data points (n), and the chi-squared
                                value (chi2).
        @rtype:                 tuple of int, int, float
        """

        # Bad argument combination.
        if model_info == None and spin_id == None:
            raise RelaxError(
                "Either the model_info or spin_id argument must be supplied.")
        elif model_info != None and spin_id != None:
            raise RelaxError("The model_info arg " + repr(model_info) +
                             " and spin_id arg " + repr(spin_id) +
                             " clash.  Only one should be supplied.")

        # Initialise.
        k_total = 0
        n_total = 0
        chi2_total = 0.0

        # Specific setup.
        for pipe in cdp.hybrid_pipes:
            # Switch to the data pipe.
            pipes.switch(pipe)

            # Specific model statistics and number of instances functions.
            model_statistics = setup.get_specific_fn('model_stats',
                                                     pipes.get_type(pipe))

            # Loop over the instances.
            #for i in range(num):
            # Get the statistics.
            k, n, chi2 = model_statistics(model_info=model_info,
                                          spin_id=spin_id,
                                          global_stats=global_stats)

            # Bad stats.
            if k == None or n == None or chi2 == None:
                continue

            # Sum the stats.
            k_total = k_total + k
            n_total = n_total + n
            chi2_total = chi2_total + chi2

        # Return the totals.
        return k_total, n_total, chi2_total
示例#37
0
    def __init__(self,
                 pipe_name=None,
                 pipe_bundle=None,
                 file_root='rx',
                 results_dir=None,
                 grid_inc=11,
                 mc_sim_num=500,
                 view_plots=True):
        """Perform relaxation curve fitting.

        To use this auto-analysis, a data pipe with all the required data needs to be set up.  This data pipe should contain the following:

            - All the spins loaded.
            - Unresolved spins deselected.
            - All the peak intensities loaded and relaxation delay times set.
            - Either the baseplane noise RMSD values should be set or replicated spectra loaded.

        @keyword pipe_name:     The name of the data pipe containing all of the data for the analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:      str
        @keyword file_root:     File root of the output filea.
        @type file_root:        str
        @keyword results_dir:   The directory where results files are saved.
        @type results_dir:      str
        @keyword grid_inc:      Number of grid search increments.
        @type grid_inc:         int
        @keyword mc_sim_num:    The number of Monte Carlo simulations to be used for error analysis at the end of the analysis.
        @type mc_sim_num:       int
        @keyword view_plots:    Flag to automatically view grace plots after calculation.
        @type view_plots:       bool
        """

        # Initial printout.
        title(file=sys.stdout, text="Relaxation curve-fitting auto-analysis")

        # Safely execute the full protocol.
        try:
            # Execution lock.
            status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

            # Set up the analysis status object.
            status.init_auto_analysis(pipe_bundle, type='relax_fit')
            status.current_analysis = pipe_bundle

            # Store the args.
            self.pipe_name = pipe_name
            self.pipe_bundle = pipe_bundle
            self.file_root = file_root
            self.results_dir = results_dir
            if self.results_dir:
                self.grace_dir = results_dir + sep + 'grace'
            else:
                self.grace_dir = 'grace'
            self.mc_sim_num = mc_sim_num
            self.grid_inc = grid_inc
            self.view_plots = view_plots

            # Data checks.
            self.check_vars()

            # Set the data pipe to the current data pipe.
            if self.pipe_name != cdp_name():
                switch(self.pipe_name)

            # Load the interpreter.
            self.interpreter = Interpreter(show_script=False,
                                           raise_relax_error=True)
            self.interpreter.populate_self()
            self.interpreter.on(verbose=False)

            # Execute.
            self.run()

        # Clean up.
        finally:
            # Final printout.
            title(
                file=sys.stdout,
                text="Completion of the relaxation curve-fitting auto-analysis"
            )
            print_elapsed_time(time() - status.start_time)

            # Finish and unlock execution.
            status.auto_analysis[self.pipe_bundle].fin = True
            status.current_analysis = None
            status.exec_lock.release()
示例#38
0
def select(method=None, modsel_pipe=None, bundle=None, pipes=None):
    """Model selection function.

    @keyword method:        The model selection method.  This can currently be one of:
                                - 'AIC', Akaike's Information Criteria.
                                - 'AICc', Small sample size corrected AIC.
                                - 'BIC', Bayesian or Schwarz Information Criteria.
                                - 'CV', Single-item-out cross-validation.
                            None of the other model selection techniques are currently supported.
    @type method:           str
    @keyword modsel_pipe:   The name of the new data pipe to be created by copying of the selected data pipe.
    @type modsel_pipe:      str
    @keyword bundle:        The optional data pipe bundle to associate the newly created pipe with.
    @type bundle:           str or None
    @keyword pipes:         A list of the data pipes to use in the model selection.
    @type pipes:            list of str
    """

    # Test if the pipe already exists.
    if has_pipe(modsel_pipe):
        raise RelaxPipeError(modsel_pipe)

    # Use all pipes.
    if pipes == None:
        # Get all data pipe names from the relax data store.
        pipes = pipe_names()

    # Select the model selection technique.
    if method == 'AIC':
        print("AIC model selection.")
        formula = aic
    elif method == 'AICc':
        print("AICc model selection.")
        formula = aicc
    elif method == 'BIC':
        print("BIC model selection.")
        formula = bic
    elif method == 'CV':
        print("CV model selection.")
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")
    else:
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")

    # No pipes.
    if len(pipes) == 0:
        raise RelaxError("No data pipes are available for use in model selection.")

    # Initialise.
    function_type = {}
    model_loop = {}
    model_type = {}
    duplicate_data = {}
    model_statistics = {}
    skip_function = {}
    modsel_pipe_exists = False

    # Cross validation setup.
    if isinstance(pipes[0], list):
        # No pipes.
        if len(pipes[0]) == 0:
            raise RelaxError("No pipes are available for use in model selection in the array " + repr(pipes[0]) + ".")

        # Loop over the data pipes.
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                # Specific functions.
                model_loop[pipes[i][j]] = get_specific_fn('model_loop', get_type(pipes[i][j]))
                model_type[pipes[i][j]] = get_specific_fn('model_type', get_type(pipes[i][j]))
                duplicate_data[pipes[i][j]] = get_specific_fn('duplicate_data', get_type(pipes[i][j]))
                model_statistics[pipes[i][j]] = get_specific_fn('model_stats', get_type(pipes[i][j]))
                skip_function[pipes[i][j]] = get_specific_fn('skip_function', get_type(pipes[i][j]))

        # The model loop should be the same for all data pipes!
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_loop[pipes[0][j]] != model_loop[pipes[i][j]]:
                    raise RelaxError("The models for each data pipes should be the same.")
        model_loop = model_loop[pipes[0][0]]

        # The model description.
        model_desc = get_specific_fn('model_desc', get_type(pipes[0]))

        # Global vs. local models.
        global_flag = False
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_type[pipes[i][j]]() == 'global':
                    global_flag = True

    # All other model selection setup.
    else:
        # Loop over the data pipes.
        for i in range(len(pipes)):
            # Specific functions.
            model_loop[pipes[i]] = get_specific_fn('model_loop', get_type(pipes[i]))
            model_type[pipes[i]] = get_specific_fn('model_type', get_type(pipes[i]))
            duplicate_data[pipes[i]] = get_specific_fn('duplicate_data', get_type(pipes[i]))
            model_statistics[pipes[i]] = get_specific_fn('model_stats', get_type(pipes[i]))
            skip_function[pipes[i]] = get_specific_fn('skip_function', get_type(pipes[i]))

        model_loop = model_loop[pipes[0]]

        # The model description.
        model_desc = get_specific_fn('model_desc', get_type(pipes[0]))

        # Global vs. local models.
        global_flag = False
        for j in range(len(pipes)):
            if model_type[pipes[j]]() == 'global':
                global_flag = True


    # Loop over the base models.
    for model_info in model_loop():
        # Print out.
        print("\n")
        desc = model_desc(model_info)
        if desc:
            print(desc)

        # Initial model.
        best_model = None
        best_crit = 1e300
        data = []

        # Loop over the pipes.
        for j in range(len(pipes)):
            # Single-item-out cross validation.
            if method == 'CV':
                # Sum of chi-squared values.
                sum_crit = 0.0

                # Loop over the validation samples and sum the chi-squared values.
                for k in range(len(pipes[j])):
                    # Alias the data pipe name.
                    pipe = pipes[j][k]

                    # Switch to this pipe.
                    switch(pipe)

                    # Skip function.
                    if skip_function[pipe](model_info):
                        continue

                    # Get the model statistics.
                    k, n, chi2 = model_statistics[pipe](model_info)

                    # Missing data sets.
                    if k == None or n == None or chi2 == None:
                        continue

                    # Chi2 sum.
                    sum_crit = sum_crit + chi2

                # Cross-validation criterion (average chi-squared value).
                crit = sum_crit / float(len(pipes[j]))

            # Other model selection methods.
            else:
                # Reassign the pipe.
                pipe = pipes[j]

                # Switch to this pipe.
                switch(pipe)

                # Skip function.
                if skip_function[pipe](model_info):
                    continue

                # Get the model statistics.
                k, n, chi2 = model_statistics[pipe](model_info, global_stats=global_flag)

                # Missing data sets.
                if k == None or n == None or chi2 == None:
                    continue

                # Calculate the criterion value.
                crit = formula(chi2, float(k), float(n))

                # Store the values for a later printout.
                data.append([pipe, repr(k), repr(n), "%.5f" % chi2, "%.5f" % crit])

            # Select model.
            if crit < best_crit:
                best_model = pipe
                best_crit = crit

        # Write out the table.
        write_data(out=sys.stdout, headings=["Data pipe", "Num_params_(k)", "Num_data_sets_(n)", "Chi2", "Criterion"], data=data)

        # Duplicate the data from the 'best_model' to the model selection data pipe.
        if best_model != None:
            # Print out of selected model.
            print("The model from the data pipe " + repr(best_model) + " has been selected.")

            # Switch to the selected data pipe.
            switch(best_model)

            # Duplicate.
            duplicate_data[best_model](best_model, modsel_pipe, model_info, global_stats=global_flag, verbose=False)

            # Model selection pipe now exists.
            modsel_pipe_exists = True

        # No model selected.
        else:
            # Print out of selected model.
            print("No model has been selected.")

    # Switch to the model selection pipe.
    if modsel_pipe_exists:
        switch(modsel_pipe)

    # Bundle the data pipe.
    if bundle:
        pipe_control.pipes.bundle(bundle=bundle, pipe=modsel_pipe)
示例#39
0
    def setUp(self):
        """Set up for all the relaxation data unit tests."""

        # Add a data pipe to the data store.
        ds.add(pipe_name='orig', pipe_type='mf')

        # Add a second data pipe for copying tests.
        ds.add(pipe_name='test', pipe_type='mf')

        # Set the current data pipe to 'orig'.
        pipes.switch('orig')

        # The Ap4Aase 600 MHz NOE data.
        self.Ap4Aase_600_NOE_val = [
            None, None, None, 0.12479588727508535, 0.42240815792914105,
            0.45281703194372114, 0.60727570079478255, 0.63871921623680161,
            None, None, None, 0.92927160307645906, 0.88832516377296256,
            0.84945042565860407, 0.73027277534135793, 1.0529350986375761,
            0.80025161548578949, 0.9225805138227271, None, 0.83690702968916975,
            0.82750462671474634, 0.94498415442235661, None, 0.8935097799257431,
            0.86456261089305875, 0.74923159572687958, 0.82028906681170666,
            0.95078138769755005, 0.88196946543481614, 0.88560694800603623,
            None, 0.93583460370655014, 0.83709220285834895,
            0.77065893466772672, 0.74898049254126575, 0.75473259762308997,
            None, 0.72339922138816593, 0.7409139945787665, 0.81036305956996824,
            0.93259428996098348, None, None, None, 0.97484276729559749,
            0.79870627747000578, 0.77846459298477833, 0.85891945210952814,
            0.82545651205700832, 0.77308724653857397, 0.83873490797355599,
            0.78962147119962445, None, None, 0.83658554344066838,
            0.94444774229292805, 0.88892100988408906, 0.89074818049490534,
            0.93798213161209065, 0.89579384792870853, 0.90689840050040216,
            0.86826627855975114, 0.88998453873904826, 0.93193995326551327,
            0.91380634390651083, 0.86088897739301773, 0.91200692603214106,
            0.89667919287639897, 0.95894205272847033, 0.83602820831090652,
            0.91434697423385458, 0.78332056488518564, 0.82655263496972042,
            0.82607341906155618, 0.88040402181165589, 0.8440486006693505, None,
            0.82043041764520075, 0.78703432521087158, 0.82699368240002646,
            0.85174803791662423, None, 0.84885669819226628,
            0.89183703777040746, None, None, 0.92544635676371245,
            0.68656513923277818, 0.72560011690157689, 0.69795502821734268,
            0.57079416490593249, None, 0.71563067539320835, None,
            0.50533076429030188, None, 0.75242786769880365,
            0.70895981542011155, None, 0.33272491279305588, None,
            0.90094329970739295, 0.88707256046705585, 0.87992586160833552,
            0.79195727809693339, 0.91358573817741873, 0.90242759014288332,
            0.97529004068103053, 0.88453257922238127, 0.93282837259539797,
            0.80050317711189245, 0.87281500262478917, 0.82161925495299371,
            0.75331847553368936, 0.86583135026629754, 0.8423490949685033,
            0.75544980660586103, 0.89663290907940885, 0.86241114220463833,
            0.865189333746754, 0.76072465838213588, 0.8627935013491016,
            0.58872842981275242, None, 0.66265488495054237, None,
            0.6175482423717148, 0.57128306878306878, None, 0.65783414097673298,
            0.78352915459861194, 0.90660549423688019, 0.69562244671213447,
            0.8986224709427465, 0.93456283575144872, None, 0.96438783132840478,
            0.88494476363170493, None, 0.81450919045756742, None,
            0.89273806940361811, 0.89784704409243976, 0.89409823895739682,
            None, 0.75626758626525903, None, 0.81861161936806948,
            0.77704881157681638, 0.93492416343713725, 0.80529201617441148,
            None, 0.75214448729046979, 0.77939624899611037,
            0.88957406230133507, 0.83119933716570005, 0.91593660447979419,
            1.0269367764915405, 0.95254605768690148, 0.80783819302725635,
            0.91264712309949736, 0.87414218862982118, None, 0.8457055541736257,
            0.7976400443272097
        ]
        self.Ap4Aase_600_NOE_err = [
            None, None, None, 0.020551827436105764, 0.02016346825976852,
            0.026272719841642134, 0.032369427242382849, 0.024695665815261791,
            None, None, None, 0.059569089743604184, 0.044119641308479306,
            0.060533543601110441, 0.054366133132835504, 0.10226383618816391,
            0.05217226473549319, 0.040042471153624366, None,
            0.043355836219158402, 0.070804231151989958, 0.045958118280731972,
            None, 0.040080159235713876, 0.025516073550159439,
            0.031147400155540676, 0.029551538089533019, 0.058781807250738359,
            0.053798141218956298, 0.04058564845028198, None,
            0.049964861028149038, 0.056277751722041303, 0.055862610530979066,
            0.062426501508834664, 0.037655461974785032, None,
            0.037437746270300623, 0.031899198795702917, 0.05883492648236429,
            0.049025044758579737, None, None, None, 0.061101732084737577,
            0.039371899134381119, 0.047660696280181554, 0.047240554321579087,
            0.023582035074293468, 0.047687722096308678, 0.023196186303095979,
            0.030389229790461863, None, None, 0.033964837875935606,
            0.039442739174763576, 0.02652568603842493, 0.044926643986551523,
            0.039072857662117114, 0.045558051667044089, 0.034817385027641354,
            0.037932460586176255, 0.04416504039747577, 0.046692690948839703,
            0.065323967027699076, 0.039701400872345881, 0.042298623934643773,
            0.079080932418819722, 0.067928676079858738, 0.020489325306093879,
            0.036174251087283844, 0.024485137678863723, 0.059736004586569386,
            0.028861627878037942, 0.029401903503258862, 0.045460315556323593,
            None, 0.02537282712438679, 0.053007727294934082,
            0.061355070294094288, 0.047992183164804886, None,
            0.056269021809127781, 0.063703951001952613, None, None,
            0.10564633090541133, 0.057727260927784539, 0.046307285409240992,
            0.043081197184003071, 0.040201168842464927, None,
            0.029680853171297025, None, 0.13110007530113085, None,
            0.055416928281966663, 0.066345573350012677, None,
            0.11395960586085771, None, 0.079438544471658351,
            0.064210071948445219, 0.063995466359721184, 0.058755076446370007,
            0.06960783712536886, 0.079202821774062071, 0.055639087817357311,
            0.044727670551887241, 0.054122883235643848, 0.039242551779018732,
            0.042620700259960162, 0.044970961360827032, 0.16191971307506142,
            0.03075410906877173, 0.029862890782436795, 0.020403875440027055,
            0.027212286424039007, 0.030489671728569135, 0.036801376842568193,
            0.04239547098051423, 0.031102853835183651, 0.019647495159139456,
            None, 0.018633796971680151, None, 0.016649721653701773,
            0.021020898014190763, None, 0.038324025574230064,
            0.039477530263583097, 0.073251142306895001, 0.029252551207069242,
            0.029800820512347403, 0.04190589192257109, None,
            0.051973164180429951, 0.037885698528167658, None,
            0.028795497444627904, None, 0.038102768163308702,
            0.047105957099339957, 0.031888501753176597, None,
            0.037573735892664904, None, 0.045323274177265373,
            0.067437069694904428, 0.077974178652576179, 0.047904023844685867,
            None, 0.046470703715260887, 0.056734780619811562,
            0.040848702413964474, 0.031770841283563625, 0.045827271513353023,
            0.062189928910417011, 0.037543295138971443, 0.039174104595193118,
            0.0406911452975608, 0.026051431683394789, None,
            0.033339638549660086, 0.028434080259488268
        ]
示例#40
0
    def __init__(self, pipe_name=None, pipe_bundle=None, file_root='rx', results_dir=None, grid_inc=11, mc_sim_num=500, view_plots=True):
        """Perform relaxation curve fitting.

        To use this auto-analysis, a data pipe with all the required data needs to be set up.  This data pipe should contain the following:

            - All the spins loaded.
            - Unresolved spins deselected.
            - All the peak intensities loaded and relaxation delay times set.
            - Either the baseplane noise RMSD values should be set or replicated spectra loaded.

        @keyword pipe_name:     The name of the data pipe containing all of the data for the analysis.
        @type pipe_name:        str
        @keyword pipe_bundle:   The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:      str
        @keyword file_root:     File root of the output filea.
        @type file_root:        str
        @keyword results_dir:   The directory where results files are saved.
        @type results_dir:      str
        @keyword grid_inc:      Number of grid search increments.
        @type grid_inc:         int
        @keyword mc_sim_num:    The number of Monte Carlo simulations to be used for error analysis at the end of the analysis.
        @type mc_sim_num:       int
        @keyword view_plots:    Flag to automatically view grace plots after calculation.
        @type view_plots:       bool
        """

        # Execution lock.
        status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

        # Set up the analysis status object.
        status.init_auto_analysis(pipe_bundle, type='relax_fit')
        status.current_analysis = pipe_bundle

        # Store the args.
        self.pipe_name = pipe_name
        self.pipe_bundle = pipe_bundle
        self.file_root = file_root
        self.results_dir = results_dir
        if self.results_dir:
            self.grace_dir = results_dir + sep + 'grace'
        else:
            self.grace_dir = 'grace'
        self.mc_sim_num = mc_sim_num
        self.grid_inc = grid_inc
        self.view_plots = view_plots

        # Data checks.
        self.check_vars()

        # Set the data pipe to the current data pipe.
        if self.pipe_name != cdp_name():
            switch(self.pipe_name)

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Execute.
        self.run()

        # Finish and unlock execution.
        status.auto_analysis[self.pipe_bundle].fin = True
        status.current_analysis = None
        status.exec_lock.release()
示例#41
0
def copy(pipe_from=None, pipe_to=None, param=None, force=False):
    """Copy spin specific data values from pipe_from to pipe_to.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data
                        pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    @param param:       The name of the parameter to copy the values of.
    @type param:        str
    @keyword force:     A flag forcing the overwriting of current values.
    @type force:        bool
    """

    # The current data pipe.
    if pipe_from == None:
        pipe_from = pipes.cdp_name()
    if pipe_to == None:
        pipe_to = pipes.cdp_name()
    pipe_orig = pipes.cdp_name()

    # The second pipe does not exist.
    check_pipe(pipe_to)

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # The specific analysis API object.
    api = return_api(pipe_name=pipe_from)

    # Test if the data exists for pipe_to.
    if force == False:
        for spin in spin_loop(pipe=pipe_to):
            # Get the value and error for pipe_to.
            value, error = api.return_value(spin, param)

            # Data exists.
            if value != None or error != None:
                raise RelaxValueError(param, pipe_to)

    # Switch to the data pipe to copy values to.
    pipes.switch(pipe_to)

    # Copy the values.
    for spin, spin_id in spin_loop(pipe=pipe_from, return_id=True):
        # Get the value and error from pipe_from.
        value, error = api.return_value(spin, param)

        # Set the values of pipe_to.
        if value != None:
            set(spin_id=spin_id, val=value, param=param, pipe=pipe_to, force=force)
        if error != None:
            set(spin_id=spin_id, val=error, param=param, pipe=pipe_to, error=True, force=force)

    # Reset all minimisation statistics.
    minimise.reset_min_stats(pipe_to)

    # Switch back to the original current data pipe.
    pipes.switch(pipe_orig)
示例#42
0
def set(val=None, param=None, index=None, pipe=None, spin_id=None, verbosity=1, error=False, force=True, reset=True):
    """Set global or spin specific data values.

    @keyword val:       The parameter values.
    @type val:          None or list
    @keyword param:     The parameter names.
    @type param:        None, str, or list of str
    @keyword index:     The index for parameters which are of the list-type.  This is ignored for all other types.
    @type index:        None or int
    @keyword pipe:      The data pipe the values should be placed in.
    @type pipe:         None or str
    @keyword spin_id:   The spin identification string.
    @type spin_id:      str
    @keyword verbosity: The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:    int
    @keyword error:     A flag which if True will allow the parameter errors to be set instead of the values.
    @type error:        bool
    @keyword force:     A flag forcing the overwriting of current values.
    @type force:        bool
    @keyword reset:     A flag which if True will cause all minimisation statistics to be reset.
    @type reset:        bool
    """

    # Switch to the data pipe, storing the original.
    if pipe:
        orig_pipe = pipes.cdp_name()
        pipes.switch(pipe)

    # Test if the current data pipe exists.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Convert numpy arrays to lists, if necessary.
    if isinstance(val, ndarray):
        val = val.tolist()

    # Invalid combinations.
    if (isinstance(val, float) or isinstance(val, int)) and param == None:
        raise RelaxError("The combination of a single value '%s' without specifying the parameter name is invalid." % val)
    if isinstance(val, list) and isinstance(param, str):
        raise RelaxError("Invalid combination:  When multiple values '%s' are specified, either no parameters or a list of parameters must by supplied rather than the single parameter '%s'." % (val, param))

    # Value array and parameter array of equal length.
    if isinstance(val, list) and isinstance(param, list) and len(val) != len(param):
        raise RelaxError("Both the value array and parameter array must be of equal length.")

    # Get the parameter list if needed.
    if param == None:
        param = api.get_param_names()

    # Convert the param to a list if needed.
    if not isinstance(param, list):
        param = [param]

    # Convert the value to a list if needed.
    if val != None and not isinstance(val, list):
        val = [val] * len(param)

    # Default values.
    if val == None:
        # Loop over the parameters, getting the default values.
        val = []
        for i in range(len(param)):
            val.append(api.default_value(param[i]))

            # Check that there is a default.
            if val[-1] == None:
                raise RelaxParamSetError(param[i])

    # Set the parameter values.
    api.set_param_values(param=param, value=val, index=index, spin_id=spin_id, error=error, force=force)

    # Reset all minimisation statistics.
    if reset:
        minimise.reset_min_stats(verbosity=verbosity)

    # Switch back.
    if pipe:
        pipes.switch(orig_pipe)
示例#43
0
def copy(pipe_from=None, pipe_to=None, param=None, force=False):
    """Copy spin specific data values from pipe_from to pipe_to.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data
                        pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    @param param:       The name of the parameter to copy the values of.
    @type param:        str
    @keyword force:     A flag forcing the overwriting of current values.
    @type force:        bool
    """

    # The current data pipe.
    if pipe_from == None:
        pipe_from = pipes.cdp_name()
    if pipe_to == None:
        pipe_to = pipes.cdp_name()
    pipe_orig = pipes.cdp_name()

    # The second pipe does not exist.
    check_pipe(pipe_to)

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # The specific analysis API object.
    api = return_api(pipe_name=pipe_from)

    # Test if the data exists for pipe_to.
    if force == False:
        for spin in spin_loop(pipe=pipe_to):
            # Get the value and error for pipe_to.
            value, error = api.return_value(spin, param)

            # Data exists.
            if value != None or error != None:
                raise RelaxValueError(param, pipe_to)

    # Switch to the data pipe to copy values to.
    pipes.switch(pipe_to)

    # Copy the values.
    for spin, spin_id in spin_loop(pipe=pipe_from, return_id=True):
        # Get the value and error from pipe_from.
        value, error = api.return_value(spin, param)

        # Set the values of pipe_to.
        if value != None:
            set(spin_id=spin_id, val=value, param=param, pipe=pipe_to, force=force)
        if error != None:
            set(spin_id=spin_id, val=error, param=param, pipe=pipe_to, error=True, force=force)

    # Reset all minimisation statistics.
    minimise.reset_min_stats(pipe_to)

    # Switch back to the original current data pipe.
    pipes.switch(pipe_orig)
示例#44
0
def copy(pipe_from=None, pipe_to=None, param=None):
    """Copy spin specific data values from pipe_from to pipe_to.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data
                        pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    @param param:       The name of the parameter to copy the values of.
    @type param:        str
    """

    # The current data pipe.
    if pipe_from == None:
        pipe_from = pipes.cdp_name()
    if pipe_to == None:
        pipe_to = pipes.cdp_name()
    pipe_orig = pipes.cdp_name()

    # The second pipe does not exist.
    pipes.test(pipe_to)

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # Specific value and error returning function.
    return_value = specific_analyses.setup.get_specific_fn('return_value', pipes.get_type(pipe_from))

    # Test if the data exists for pipe_to.
    for spin in spin_loop(pipe=pipe_to):
        # Get the value and error for pipe_to.
        value, error = return_value(spin, param)

        # Data exists.
        if value != None or error != None:
            raise RelaxValueError(param, pipe_to)

    # Switch to the data pipe to copy values to.
    pipes.switch(pipe_to)

    # Copy the values.
    for spin, spin_id in spin_loop(pipe=pipe_from, return_id=True):
        # Get the value and error from pipe_from.
        value, error = return_value(spin, param)

        # Set the values of pipe_to.
        if value != None:
            set(spin_id=spin_id, val=value, param=param, pipe=pipe_to)
        if error != None:
            set(spin_id=spin_id, val=error, param=param, pipe=pipe_to, error=True)

    # Reset all minimisation statistics.
    minimise.reset_min_stats(pipe_to)

    # Switch back to the original current data pipe.
    pipes.switch(pipe_orig)
示例#45
0
def select(method=None, modsel_pipe=None, bundle=None, pipes=None):
    """Model selection function.

    @keyword method:        The model selection method.  This can currently be one of:
                                - 'AIC', Akaike's Information Criteria.
                                - 'AICc', Small sample size corrected AIC.
                                - 'BIC', Bayesian or Schwarz Information Criteria.
                                - 'CV', Single-item-out cross-validation.
                            None of the other model selection techniques are currently supported.
    @type method:           str
    @keyword modsel_pipe:   The name of the new data pipe to be created by copying of the selected data pipe.
    @type modsel_pipe:      str
    @keyword bundle:        The optional data pipe bundle to associate the newly created pipe with.
    @type bundle:           str or None
    @keyword pipes:         A list of the data pipes to use in the model selection.
    @type pipes:            list of str
    """

    # Test if the pipe already exists.
    if has_pipe(modsel_pipe):
        raise RelaxPipeError(modsel_pipe)

    # Use all pipes.
    if pipes == None:
        # Get all data pipe names from the relax data store.
        pipes = pipe_names()

    # Select the model selection technique.
    if method == 'AIC':
        print("AIC model selection.")
        formula = aic
    elif method == 'AICc':
        print("AICc model selection.")
        formula = aicc
    elif method == 'BIC':
        print("BIC model selection.")
        formula = bic
    elif method == 'CV':
        print("CV model selection.")
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")
    else:
        raise RelaxError("The model selection technique " + repr(method) + " is not currently supported.")

    # No pipes.
    if len(pipes) == 0:
        raise RelaxError("No data pipes are available for use in model selection.")

    # Initialise.
    function_type = {}
    model_loop = {}
    model_type = {}
    duplicate_data = {}
    model_statistics = {}
    skip_function = {}
    modsel_pipe_exists = False

    # Cross validation setup.
    if isinstance(pipes[0], list):
        # No pipes.
        if len(pipes[0]) == 0:
            raise RelaxError("No pipes are available for use in model selection in the array " + repr(pipes[0]) + ".")

        # Loop over the data pipes.
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                # The specific analysis API object.
                api = return_api(pipe_name=pipes[i][j])

                # Store the specific functions.
                model_loop[pipes[i][j]] = api.model_loop
                model_type[pipes[i][j]] = api.model_type
                duplicate_data[pipes[i][j]] = api.duplicate_data
                model_statistics[pipes[i][j]] = api.model_statistics
                skip_function[pipes[i][j]] = api.skip_function

        # The model loop should be the same for all data pipes!
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_loop[pipes[0][j]] != model_loop[pipes[i][j]]:
                    raise RelaxError("The models for each data pipes should be the same.")

        # Alias some function from the specific API of the first data pipe.
        api = return_api(pipe_name=pipes[0][0])
        model_loop = api.model_loop
        model_desc = api.model_desc

        # Global vs. local models.
        global_flag = False
        for i in range(len(pipes)):
            for j in range(len(pipes[i])):
                if model_type[pipes[i][j]]() == 'global':
                    global_flag = True

    # All other model selection setup.
    else:
        # Loop over the data pipes.
        for i in range(len(pipes)):
            # The specific analysis API object.
            api = return_api()

            # Store the specific functions.
            model_loop[pipes[i]] = api.model_loop
            model_type[pipes[i]] = api.model_type
            duplicate_data[pipes[i]] = api.duplicate_data
            model_statistics[pipes[i]] = api.model_statistics
            skip_function[pipes[i]] = api.skip_function

        # Alias some function from the specific API of the first data pipe.
        api = return_api(pipe_name=pipes[0])
        model_loop = api.model_loop
        model_desc = api.model_desc

        # Global vs. local models.
        global_flag = False
        for j in range(len(pipes)):
            if model_type[pipes[j]]() == 'global':
                global_flag = True


    # Loop over the base models.
    for model_info in model_loop():
        # Print out.
        print("\n")
        desc = model_desc(model_info)
        if desc:
            print(desc)

        # Initial model.
        best_model = None
        best_crit = 1e300
        data = []

        # Loop over the pipes.
        for j in range(len(pipes)):
            # Single-item-out cross validation.
            if method == 'CV':
                # Sum of chi-squared values.
                sum_crit = 0.0

                # Loop over the validation samples and sum the chi-squared values.
                for k in range(len(pipes[j])):
                    # Alias the data pipe name.
                    pipe = pipes[j][k]

                    # Switch to this pipe.
                    switch(pipe)

                    # Skip function.
                    if skip_function[pipe](model_info):
                        continue

                    # Get the model statistics.
                    k, n, chi2 = model_statistics[pipe](model_info)

                    # Missing data sets.
                    if k == None or n == None or chi2 == None:
                        continue

                    # Chi2 sum.
                    sum_crit = sum_crit + chi2

                # Cross-validation criterion (average chi-squared value).
                crit = sum_crit / float(len(pipes[j]))

            # Other model selection methods.
            else:
                # Reassign the pipe.
                pipe = pipes[j]

                # Switch to this pipe.
                switch(pipe)

                # Skip function.
                if skip_function[pipe](model_info):
                    continue

                # Get the model statistics.
                k, n, chi2 = model_statistics[pipe](model_info, global_stats=global_flag)

                # Missing data sets.
                if k == None or n == None or chi2 == None:
                    continue

                # Calculate the criterion value.
                crit = formula(chi2, float(k), float(n))

                # Store the values for a later printout.
                data.append([pipe, repr(k), repr(n), "%.5f" % chi2, "%.5f" % crit])

            # Select model.
            if crit < best_crit:
                best_model = pipe
                best_crit = crit

        # Write out the table.
        write_data(out=sys.stdout, headings=["Data pipe", "Num_params_(k)", "Num_data_sets_(n)", "Chi2", "Criterion"], data=data)

        # Duplicate the data from the 'best_model' to the model selection data pipe.
        if best_model != None:
            # Print out of selected model.
            print("The model from the data pipe " + repr(best_model) + " has been selected.")

            # Switch to the selected data pipe.
            switch(best_model)

            # Duplicate.
            duplicate_data[best_model](best_model, modsel_pipe, model_info, global_stats=global_flag, verbose=False)

            # Model selection pipe now exists.
            modsel_pipe_exists = True

        # No model selected.
        else:
            # Print out of selected model.
            print("No model has been selected.")

    # Switch to the model selection pipe.
    if modsel_pipe_exists:
        switch(modsel_pipe)

    # Bundle the data pipe.
    if bundle:
        pipe_control.pipes.bundle(bundle=bundle, pipe=modsel_pipe)

    # Update all of the required metadata structures.
    mol_res_spin.metadata_update()
    interatomic.metadata_update()
示例#46
0
def set(val=None, param=None, index=None, pipe=None, spin_id=None, verbosity=1, error=False, force=True, reset=True):
    """Set global or spin specific data values.

    @keyword val:       The parameter values.
    @type val:          None or list
    @keyword param:     The parameter names.
    @type param:        None, str, or list of str
    @keyword index:     The index for parameters which are of the list-type.  This is ignored for all other types.
    @type index:        None or int
    @keyword pipe:      The data pipe the values should be placed in.
    @type pipe:         None or str
    @keyword spin_id:   The spin identification string.
    @type spin_id:      str
    @keyword verbosity: The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:    int
    @keyword error:     A flag which if True will allow the parameter errors to be set instead of the values.
    @type error:        bool
    @keyword force:     A flag forcing the overwriting of current values.
    @type force:        bool
    @keyword reset:     A flag which if True will cause all minimisation statistics to be reset.
    @type reset:        bool
    """

    # Switch to the data pipe, storing the original.
    if pipe:
        orig_pipe = pipes.cdp_name()
        pipes.switch(pipe)

    # Test if the current data pipe exists.
    check_pipe()

    # The specific analysis API object.
    api = return_api()

    # Convert numpy arrays to lists, if necessary.
    if isinstance(val, ndarray):
        val = val.tolist()

    # Invalid combinations.
    if (isinstance(val, float) or isinstance(val, int)) and param == None:
        raise RelaxError("The combination of a single value '%s' without specifying the parameter name is invalid." % val)
    if isinstance(val, list) and isinstance(param, str):
        raise RelaxError("Invalid combination:  When multiple values '%s' are specified, either no parameters or a list of parameters must by supplied rather than the single parameter '%s'." % (val, param))

    # Value array and parameter array of equal length.
    if isinstance(val, list) and isinstance(param, list) and len(val) != len(param):
        raise RelaxError("Both the value array and parameter array must be of equal length.")

    # Get the parameter list if needed.
    if param == None:
        param = api.get_param_names()

    # Convert the param to a list if needed.
    if not isinstance(param, list):
        param = [param]

    # Convert the value to a list if needed.
    if val != None and not isinstance(val, list):
        val = [val] * len(param)

    # Default values.
    if val == None:
        # Loop over the parameters, getting the default values.
        val = []
        for i in range(len(param)):
            val.append(api.default_value(param[i]))

            # Check that there is a default.
            if val[-1] == None:
                raise RelaxParamSetError(param[i])

    # Set the parameter values.
    api.set_param_values(param=param, value=val, index=index, spin_id=spin_id, error=error, force=force)

    # Reset all minimisation statistics.
    if reset:
        minimise.reset_min_stats(verbosity=verbosity)

    # Switch back.
    if pipe:
        pipes.switch(orig_pipe)
示例#47
0
    def test_tp02_data_to_tp02(self):
        """Test the GUI analysis with the relaxation dispersion 'TP02' model fitting to the 'TP02' synthetic data."""

        # The paths to the data files.
        data_path = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'dispersion'+sep+'r1rho_off_res_tp02'+sep

        # Simulate the new analysis wizard, selecting the fixed time CPMG experiment.
        analysis = self.new_analysis_wizard(analysis_type='disp')

        # Change the results directory.
        analysis.field_results_dir.SetValue(str_to_gui(ds.tmpdir))

        # Create the sequence data.
        self._execute_uf(uf_name='spin.create', res_name='Trp', res_num=1, spin_name='N')
        interpreter.flush()
        self._execute_uf(uf_name='spin.create', res_name='Trp', res_num=2, spin_name='N')
        interpreter.flush()
        self._execute_uf(uf_name='sequence.display')
        interpreter.flush()

        # Set up the nuclear isotopes.
        analysis.spin_isotope()
        uf_store['spin.isotope'].page.SetValue('spin_id', '')
        uf_store['spin.isotope'].wizard._go_next()
        interpreter.flush()    # Required because of the asynchronous uf call.

        # Load the chemical shift data.
        self._execute_uf(uf_name='chemical_shift.read', file='ref_500MHz.list', dir=data_path)
        interpreter.flush()

        # The spectral data.
        frq = [500, 800]
        frq_label = ['500MHz', '800MHz']
        error = 200000.0
        data = []
        spin_lock = [None, 1000.0, 1500.0, 2000.0, 2500.0, 3000.0, 3500.0, 4000.0, 4500.0, 5000.0, 5500.0, 6000.0]
        for frq_index in range(len(frq)):
            for spin_lock_index in range(len(spin_lock)):
                # The reference.
                if spin_lock[spin_lock_index] == None:
                    id = 'ref_%s' % frq_label[frq_index]
                    file = "ref_%s.list" % frq_label[frq_index]

                # Normal data.
                else:
                    id = "nu_%s_%s" % (spin_lock[spin_lock_index], frq_label[frq_index])
                    file = "nu_%s_%s.list" % (spin_lock[spin_lock_index], frq_label[frq_index])

                # Append the data.
                data.append([id, file, spin_lock[spin_lock_index], frq[frq_index]])

        # Load the R1 data.
        for frq_index in range(len(frq)):
            label = 'R1_%s' % frq_label[frq_index]
            self._execute_uf(uf_name='relax_data.read', ri_id=label, ri_type='R1', frq=frq[frq_index]*1e6, file='%s.out'%label, dir=data_path, mol_name_col=1, res_num_col=2, res_name_col=3, spin_num_col=4, spin_name_col=5, data_col=6, error_col=7)
            interpreter.flush()

        # Set up the peak intensity wizard.
        analysis.peak_wizard_launch(None)
        wizard = analysis.peak_wizard

        # The spectra.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='read', file=data_path+file, spectrum_id=id, int_method='height', dim=1)
            wizard._apply(None)
        wizard._skip(None)

        # The error type.
        page = wizard.get_page(wizard.page_indices['err_type'])
        page.selection = 'rmsd'
        wizard._go_next(None)

        # Baseplane RMSD.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='rmsd', spectrum_id=id, error=error)
            wizard._apply(None)
        wizard._skip(None)

        # The experiment type.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='exp_type', spectrum_id=id, exp_type='R1rho')
            wizard._apply(None)
        wizard._skip(None)

        # Set the spectrometer frequency.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='spectrometer_frequency', id=id, frq=H_frq, units='MHz')
            wizard._apply(None)
        wizard._skip(None)

        # Set the relaxation times.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='relax_time', spectrum_id=id, time=0.1)
            wizard._apply(None)
        wizard._skip(None)

        # Set the relaxation dispersion spin-lock field strength (nu1).
        for id, file, field, H_frq in data:
            wizard.setup_page(page='spin_lock_field', spectrum_id=id, field=field)
            wizard._apply(None)
        wizard._skip(None)

        # Set the spin-lock offset.
        for id, file, field, H_frq in data:
            wizard.setup_page(page='spin_lock_offset', spectrum_id=id, offset=110.0)
            wizard._apply(None)
        wizard._skip(None)

        # Flush all wx events (to allow the spectrum list GUI element to populate all its rows).
        wx.Yield()

        # Simulate right clicking in the spectrum list element to test the popup menu.
        analysis.peak_intensity.on_right_click(Fake_right_click())

        # Simulate the popup menu entries to catch bugs there (just apply the user functions with the currently set values).
        # FIXME: skipping the checks for certain wxPython bugs.
        if status.relax_mode != 'gui' and wx.version() != '2.9.4.1 gtk2 (classic)':
            analysis.peak_intensity.action_relax_disp_spin_lock_field(item=4)
            uf_store['relax_disp.spin_lock_field'].wizard._go_next()
            interpreter.flush()
            analysis.peak_intensity.action_relax_disp_exp_type(item=5)
            uf_store['relax_disp.exp_type'].wizard._go_next()
            interpreter.flush()
            analysis.peak_intensity.action_relax_disp_relax_time(item=0)
            uf_store['relax_disp.relax_time'].wizard._go_next()
            interpreter.flush()
            analysis.peak_intensity.action_spectrometer_frq(item=10)
            uf_store['spectrometer.frequency'].wizard._go_next()
            interpreter.flush()

        # Deselect all but the 'TP02' model.
        models = [MODEL_R2EFF, MODEL_NOREX, MODEL_TP02]
        for i in range(len(analysis.model_field.models_stripped)):
            if analysis.model_field.models_stripped[i] in models:
                analysis.model_field.select[i] = True
            else:
                analysis.model_field.select[i] = False
        analysis.model_field.modify()

        # Set the grid search size and number of MC sims.
        analysis.grid_inc.SetValue(4)
        analysis.mc_sim_num.SetValue(3)

        # Optimisation speedups.
        analysis.opt_func_tol = 1e-10
        analysis.opt_max_iterations = 10000

        # Execute relax.
        analysis.execute(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, analysis.button_exec_relax.GetId()))

        # Wait for execution to complete.
        analysis.thread.join()

        # Flush all wx events.
        wx.Yield()

        # Exceptions in the thread.
        self.check_exceptions()

        # Check the relax controller.
        # FIXME: skipping the checks for certain wxPython bugs.
        if status.relax_mode != 'gui' and wx.version() != '2.9.4.1 gtk2 (classic)':
            self.assertEqual(self.app.gui.controller.mc_gauge_rx.GetValue(), 100)
            self.assertEqual(self.app.gui.controller.main_gauge.GetValue(), 100)

        # The original parameters.
        r1rho_prime = [[10.0, 15.0], [12.0, 18.0]]
        pA = 0.7654321
        kex = 1234.56789
        delta_omega = [7.0, 9.0]

        # The R20 keys.
        r20_key1 = generate_r20_key(exp_type=EXP_TYPE_R1RHO, frq=500e6)
        r20_key2 = generate_r20_key(exp_type=EXP_TYPE_R1RHO, frq=800e6)

        # Switch to the 'TP02' model data pipe, then check for each spin.
        switch("%s - %s" % ('TP02', get_bundle()))
        spin_index = 0
        for spin, spin_id in spin_loop(return_id=True):
            # Printout.
            print("\nSpin %s." % spin_id)

            # Check the fitted parameters.
            self.assertAlmostEqual(spin.r2[r20_key1]/10, r1rho_prime[spin_index][0]/10, 4)
            self.assertAlmostEqual(spin.r2[r20_key2]/10, r1rho_prime[spin_index][1]/10, 4)
            self.assertAlmostEqual(spin.dw, delta_omega[spin_index], 3)
            self.assertAlmostEqual(spin.kex/1000.0, kex/1000.0, 3)

            # Increment the spin index.
            spin_index += 1
示例#48
0
def copy(pipe_from=None, pipe_to=None):
    """Copy dispersion parameters from one data pipe to another, averaging values for clusters.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    """

    # The current data pipe.
    pipe_orig = pipes.cdp_name()
    if pipe_from == None:
        pipe_from = pipe_orig
    if pipe_to == None:
        pipe_to = pipe_orig

    # Test that the pipes exist.
    pipes.test(pipe_from)
    pipes.test(pipe_to)

    # Test that the pipes are not the same.
    if pipe_from == pipe_to:
        raise RelaxError("The source and destination pipes cannot be the same.")

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # Switch to the destination data pipe.
    pipes.switch(pipe_to)

    # Loop over the clusters.
    for spin_ids in loop_cluster():
        # Initialise some variables.
        model = None
        pA = 0.0
        pB = 0.0
        pC = 0.0
        kex = 0.0
        kex_AB = 0.0
        kex_AC = 0.0
        kex_BC = 0.0
        k_AB = 0.0
        kB = 0.0
        kC = 0.0
        tex = 0.0
        count = 0
        spins_from = []
        spins_to = []
        selected_cluster = False

        # Loop over the spins, summing the parameters to be averaged.
        for id in spin_ids:
            # Get the spins, then store them.
            spin_from = return_spin(id, pipe=pipe_from)
            spin_to = return_spin(id, pipe=pipe_to)
            spins_from.append(spin_from)
            spins_to.append(spin_to)

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The first printout.
            if not selected_cluster:
                subsection(file=sys.stdout, text="Copying parameters for the spin block %s"%spin_ids, prespace=2)

            # Change the cluster selection flag.
            selected_cluster = True

            # The model.
            if not model:
                model = spin_from.model

            # Check that the models match for all spins of the cluster.
            if spin_from.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the source data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))
            if spin_to.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the destination data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))

            # Sum the source parameters.
            if 'pA' in spin_from.params:
                pA += spin_from.pA
            if 'pB' in spin_from.params:
                pB += spin_from.pB
            if 'pC' in spin_from.params:
                pC += spin_from.pC
            if 'kex' in spin_from.params:
                kex += spin_from.kex
            if 'kex_AB' in spin_from.params:
                kex_AB += spin_from.kex_AB
            if 'kex_AC' in spin_from.params:
                kex_AC += spin_from.kex_AC
            if 'kex_BC' in spin_from.params:
                kex_BC += spin_from.kex_BC
            if 'k_AB' in spin_from.params:
                k_AB += spin_from.k_AB
            if 'kB' in spin_from.params:
                kB += spin_from.kB
            if 'kC' in spin_from.params:
                kC += spin_from.kC
            if 'tex' in spin_from.params:
                tex += spin_from.tex

            # Increment the spin count.
            count += 1

        # The cluster is not selected, so move to the next.
        if not selected_cluster:
            continue

        # Average parameters.
        if pA != 0.0:
            pA = pA / count
            print("Averaged pA value:  %.15f" % pA)
        if pB != 0.0:
            pB = pB / count
            print("Averaged pA value:  %.15f" % pA)
        if pC != 0.0:
            pC = pC / count
            print("Averaged pC value:  %.15f" % pC)
        if kex != 0.0:
            kex = kex / count
            print("Averaged kex value: %.15f" % kex)
        if kex_AB != 0.0:
            kex_AB = kex_AB / count
            print("Averaged k_AB value: %.15f" % kex_AB)
        if kex_AC != 0.0:
            kex_AC = kex_AC / count
            print("Averaged k_AC value: %.15f" % kex_AC)
        if kex_BC != 0.0:
            kex_BC = kex_BC / count
            print("Averaged k_BC value: %.15f" % kex_BC)
        if k_AB != 0.0:
            k_AB = k_AB / count
            print("Averaged k_AB value: %.15f" % k_AB)
        if kB != 0.0:
            kB = kB / count
            print("Averaged kB value:  %.15f" % kB)
        if kC != 0.0:
            kC = kC / count
            print("Averaged kC value:  %.15f" % kC)
        if tex != 0.0:
            tex = tex / count
            print("Averaged tex value: %.15f" % tex)

        # Loop over the spins, this time copying the parameters.
        for i in range(len(spin_ids)):
            # Alias the containers.
            spin_from = spins_from[i]
            spin_to = spins_to[i]

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The R20 parameters.
            if 'r2' in spin_from.params:
                spin_to.r2 = deepcopy(spin_from.r2)
            if 'r2a' in spin_from.params:
                spin_to.r2a = deepcopy(spin_from.r2a)
            if 'r2b' in spin_from.params:
                spin_to.r2b = deepcopy(spin_from.r2b)

            # The averaged parameters.
            if 'pB' in spin_from.params and 'pC' not in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = pB
                spin_to.pC = 1.0 - pA - pB
            elif 'pA' in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = 1.0 - pA
            if 'kex' in spin_from.params:
                spin_to.kex = kex
            if 'kex_AB' in spin_from.params:
                spin_to.kex_AB = kex_AB
            if 'kex_AC' in spin_from.params:
                spin_to.kex_AC = kex_AC
            if 'kex_BC' in spin_from.params:
                spin_to.kex_BC = kex_BC
            if 'k_AB' in spin_from.params:
                spin_to.k_AB = k_AB
            if 'kB' in spin_from.params:
                spin_to.kB = kB
            if 'kC' in spin_from.params:
                spin_to.kC = kC
            if 'tex' in spin_from.params:
                spin_to.tex = tex

            # All other spin specific parameters.
            for param in spin_from.params:
                if param in ['r2', 'pA', 'pB', 'pC', 'kex', 'kex_AB', 'kex_AC', 'kex_BC', 'k_AB', 'kB', 'kC', 'tex']:
                    continue

                # Copy the value.
                setattr(spin_to, param, deepcopy(getattr(spin_from, param)))

    # Switch back to the original data pipe.
    pipes.switch(pipe_orig)
示例#49
0
def copy(pipe_from=None, pipe_to=None):
    """Copy dispersion parameters from one data pipe to another, taking the median of previous values to a start value for clusters.
    Taking the median prevent averaging extreme outliers.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    """

    # The current data pipe.
    pipe_orig = pipes.cdp_name()
    if pipe_from == None:
        pipe_from = pipe_orig
    if pipe_to == None:
        pipe_to = pipe_orig

    # Test that the pipes exist.
    check_pipe(pipe_from)
    check_pipe(pipe_to)

    # Test that the pipes are not the same.
    if pipe_from == pipe_to:
        raise RelaxError("The source and destination pipes cannot be the same.")

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # Switch to the destination data pipe.
    pipes.switch(pipe_to)

    # Loop over the clusters.
    for spin_ids in loop_cluster():
        # Initialise some variables.
        model = None
        pA = []
        pB = []
        pC = []
        kex = []
        kex_AB = []
        kex_AC = []
        kex_BC = []
        k_AB = []
        kB = []
        kC = []
        tex = []
        count = 0
        spins_from = []
        spins_to = []
        selected_cluster = False

        # Loop over the spins, adding parameters to a list, which in the end will be used to find the median.
        for id in spin_ids:
            # Get the spins, then store them.
            spin_from = return_spin(id, pipe=pipe_from)
            spin_to = return_spin(id, pipe=pipe_to)
            spins_from.append(spin_from)
            spins_to.append(spin_to)

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The first printout.
            if not selected_cluster:
                subsection(file=sys.stdout, text="Copying parameters for the spin block %s"%spin_ids, prespace=2)

            # Change the cluster selection flag.
            selected_cluster = True

            # The model.
            if not model:
                model = spin_from.model

            # Check that the models match for all spins of the cluster.
            if spin_from.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the source data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))
            if spin_to.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the destination data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))

            # Sum the source parameters.
            if 'pA' in spin_from.params:
                pA.append(spin_from.pA)
            if 'pB' in spin_from.params:
                pB.append(spin_from.pB)
            if 'pC' in spin_from.params:
                pC.append(spin_from.pC)
            if 'kex' in spin_from.params:
                kex.append(spin_from.kex)
            if 'kex_AB' in spin_from.params:
                kex_AB.append(spin_from.kex_AB)
            if 'kex_AC' in spin_from.params:
                kex_AC.append(spin_from.kex_AC)
            if 'kex_BC' in spin_from.params:
                kex_BC.append(spin_from.kex_BC)
            if 'k_AB' in spin_from.params:
                k_AB.append(spin_from.k_AB)
            if 'kB' in spin_from.params:
                kB.append(spin_from.kB)
            if 'kC' in spin_from.params:
                kC.append(spin_from.kC)
            if 'tex' in spin_from.params:
                tex.append(spin_from.tex)

            # Increment the spin count.
            count += 1

        # The cluster is not selected, so move to the next.
        if not selected_cluster:
            continue

        # Take median of parameters.
        if len(pA) > 0:
            pA = median(pA)
            print("Median pA value:  %.15f" % pA)
        if len(pB) > 0:
            pB = median(pB)
            print("Median pB value:  %.15f" % pB)
        if len(pC) > 0:
            pC = median(pC)
            print("Median pC value:  %.15f" % pC)
        if len(kex) > 0:
            kex = median(kex)
            print("Median kex value: %.15f" % kex)
        if len(kex_AB) > 0:
            kex_AB = median(kex_AB)
            print("Median k_AB value: %.15f" % kex_AB)
        if len(kex_AC) > 0:
            kex_AC = median(kex_AC)
            print("Median k_AC value: %.15f" % kex_AC)
        if len(kex_BC) > 0:
            kex_BC = median(kex_BC)
            print("Median k_BC value: %.15f" % kex_BC)
        if len(k_AB) > 0:
            k_AB = median(k_AB)
            print("Median k_AB value: %.15f" % k_AB)
        if len(kB) > 0:
            kB = median(kB)
            print("Median kB value:  %.15f" % kB)
        if len(kC) > 0:
            kC = median(kC)
            print("Median kC value:  %.15f" % kC)
        if len(tex) > 0:
            tex = median(tex)
            print("Median tex value: %.15f" % tex)

        # Loop over the spins, this time copying the parameters.
        for i in range(len(spin_ids)):
            # Alias the containers.
            spin_from = spins_from[i]
            spin_to = spins_to[i]

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The R20 parameters.
            if 'r2' in spin_from.params:
                spin_to.r2 = deepcopy(spin_from.r2)
            if 'r2a' in spin_from.params:
                spin_to.r2a = deepcopy(spin_from.r2a)
            if 'r2b' in spin_from.params:
                spin_to.r2b = deepcopy(spin_from.r2b)

            # The median parameters.
            if 'pB' in spin_from.params and 'pC' not in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = pB
                spin_to.pC = 1.0 - pA - pB
            elif 'pA' in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = 1.0 - pA
            if 'kex' in spin_from.params:
                spin_to.kex = kex
            if 'kex_AB' in spin_from.params:
                spin_to.kex_AB = kex_AB
            if 'kex_AC' in spin_from.params:
                spin_to.kex_AC = kex_AC
            if 'kex_BC' in spin_from.params:
                spin_to.kex_BC = kex_BC
            if 'k_AB' in spin_from.params:
                spin_to.k_AB = k_AB
            if 'kB' in spin_from.params:
                spin_to.kB = kB
            if 'kC' in spin_from.params:
                spin_to.kC = kC
            if 'tex' in spin_from.params:
                spin_to.tex = tex

            # All other spin specific parameters.
            for param in spin_from.params:
                if param in ['r2', 'r2a', 'r2b', 'pA', 'pB', 'pC', 'kex', 'kex_AB', 'kex_AC', 'kex_BC', 'k_AB', 'kB', 'kC', 'tex']:
                    continue

                # Copy the value.
                setattr(spin_to, param, deepcopy(getattr(spin_from, param)))

    # Switch back to the original data pipe.
    pipes.switch(pipe_orig)
    def __init__(self, pipe_name=None, pipe_bundle=None, results_dir=None, write_results_dir=None, diff_model=None, mf_models=['m0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9'], local_tm_models=['tm0', 'tm1', 'tm2', 'tm3', 'tm4', 'tm5', 'tm6', 'tm7', 'tm8', 'tm9'], grid_inc=11, diff_tensor_grid_inc={'sphere': 11, 'prolate': 11, 'oblate': 11, 'ellipsoid': 6}, min_algor='newton', mc_sim_num=500, max_iter=None, user_fns=None, conv_loop=True):
        """Perform the full model-free analysis protocol of d'Auvergne and Gooley, 2008b.

        @keyword pipe_name:             The name of the data pipe containing the sequence info.  This data pipe should have all values set including the CSA value, the bond length, the heteronucleus name and proton name.  It should also have all relaxation data loaded.
        @type pipe_name:                str
        @keyword pipe_bundle:           The data pipe bundle to associate all spawned data pipes with.
        @type pipe_bundle:              str
        @keyword results_dir:           The directory where optimisation results will read from.  Results will also be saved to this directory if the write_results_dir argument is not given.
        @type results_dir:              str
        @keyword write_results_dir:     The directory where optimisation results will be saved in.  If None, it will default to the value of the results_dir argument.  This is mainly used for debugging.
        @type write_results_dir:        str or None
        @keyword diff_model:            The global diffusion model to optimise.  This can be one of 'local_tm', 'sphere', 'oblate', 'prolate', 'ellipsoid', or 'final'.  If all or a subset of these are supplied as a list, then these will be automatically looped over and calculated.
        @type diff_model:               str or list of str
        @keyword mf_models:             The model-free models.
        @type mf_models:                list of str
        @keyword local_tm_models:       The model-free models.
        @type local_tm_models:          list of str
        @keyword grid_inc:              The grid search size (the number of increments per dimension).
        @type grid_inc:                 int
        @keyword diff_tensor_grid_inc:  A list of grid search sizes for the optimisation of the sphere, prolate spheroid, oblate spheroid, and ellipsoid.
        @type diff_tensor_grid_inc:     list of int
        @keyword min_algor:             The minimisation algorithm (in most cases this should not be changed).
        @type min_algor:                str
        @keyword mc_sim_num:            The number of Monte Carlo simulations to be used for error analysis at the end of the analysis.
        @type mc_sim_num:               int
        @keyword max_iter:              The maximum number of iterations for the global iteration.  Set to None, then the algorithm iterates until convergence.
        @type max_iter:                 int or None.
        @keyword user_fns:              A dictionary of replacement user functions.  These will overwrite the standard user functions.  The key should be the name of the user function or user function class and the value should be the function or class instance.
        @type user_fns:                 dict
        @keyword conv_loop:             Automatic looping over all rounds until convergence.
        @type conv_loop:                bool
        """

        # Execution lock.
        status.exec_lock.acquire(pipe_bundle, mode='auto-analysis')

        # Store the args.
        self.pipe_name = pipe_name
        self.pipe_bundle = pipe_bundle
        self.mf_models = mf_models
        self.local_tm_models = local_tm_models
        self.grid_inc = grid_inc
        self.diff_tensor_grid_inc = diff_tensor_grid_inc
        self.min_algor = min_algor
        self.mc_sim_num = mc_sim_num
        self.max_iter = max_iter
        self.conv_loop = conv_loop

        # The model-free data pipe names.
        self.mf_model_pipes = []
        for i in range(len(self.mf_models)):
            self.mf_model_pipes.append(self.name_pipe(self.mf_models[i]))
        self.local_tm_model_pipes = []
        for i in range(len(self.local_tm_models)):
            self.local_tm_model_pipes.append(self.name_pipe(self.local_tm_models[i]))

        # The diffusion models.
        if isinstance(diff_model, list):
            self.diff_model_list = diff_model
        else:
            self.diff_model_list = [diff_model]

        # Project directory (i.e. directory containing the model-free model results and the newly generated files)
        if results_dir:
            self.results_dir = results_dir + sep
        else:
            self.results_dir = getcwd() + sep
        if write_results_dir:
            self.write_results_dir = write_results_dir + sep
        else:
            self.write_results_dir = self.results_dir

        # Data checks.
        self.check_vars()

        # Set the data pipe to the current data pipe.
        if self.pipe_name != cdp_name():
            switch(self.pipe_name)

        # Some info for the status.
        self.status_setup()

        # Load the interpreter.
        self.interpreter = Interpreter(show_script=False, quit=False, raise_relax_error=True)
        self.interpreter.populate_self()
        self.interpreter.on(verbose=False)

        # Replacement user functions.
        if user_fns:
            for name in user_fns:
                setattr(self.interpreter, name, user_fns[name])

        # Execute the protocol.
        try:
            # Loop over the models.
            for self.diff_model in self.diff_model_list:
                # Wait a little while between diffusion models.
                sleep(1)

                # Set the global model name.
                status.auto_analysis[self.pipe_bundle].diff_model = self.diff_model

                # Initialise the convergence data structures.
                self.conv_data = Container()
                self.conv_data.chi2 = []
                self.conv_data.models = []
                self.conv_data.diff_vals = []
                if self.diff_model == 'sphere':
                    self.conv_data.diff_params = ['tm']
                elif self.diff_model == 'oblate' or self.diff_model == 'prolate':
                    self.conv_data.diff_params = ['tm', 'Da', 'theta', 'phi']
                elif self.diff_model == 'ellipsoid':
                    self.conv_data.diff_params = ['tm', 'Da', 'Dr', 'alpha', 'beta', 'gamma']
                self.conv_data.spin_ids = []
                self.conv_data.mf_params = []
                self.conv_data.mf_vals = []

                # Execute the analysis for each diffusion model.
                self.execute()

        # Clean up.
        finally:
            # Finish and unlock execution.
            status.auto_analysis[self.pipe_bundle].fin = True
            status.current_analysis = None
            status.exec_lock.release()
示例#51
0
def copy(pipe_from=None, pipe_to=None):
    """Copy dispersion parameters from one data pipe to another, taking the median of previous values to a start value for clusters.
    Taking the median prevent averaging extreme outliers.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    """

    # The current data pipe.
    pipe_orig = pipes.cdp_name()
    if pipe_from == None:
        pipe_from = pipe_orig
    if pipe_to == None:
        pipe_to = pipe_orig

    # Test that the pipes exist.
    check_pipe(pipe_from)
    check_pipe(pipe_to)

    # Test that the pipes are not the same.
    if pipe_from == pipe_to:
        raise RelaxError("The source and destination pipes cannot be the same.")

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # Switch to the destination data pipe.
    pipes.switch(pipe_to)

    # Loop over the clusters.
    for spin_ids in loop_cluster():
        # Initialise some variables.
        model = None
        pA = []
        pB = []
        pC = []
        kex = []
        kex_AB = []
        kex_AC = []
        kex_BC = []
        k_AB = []
        kB = []
        kC = []
        tex = []
        count = 0
        spins_from = []
        spins_to = []
        selected_cluster = False

        # Loop over the spins, adding parameters to a list, which in the end will be used to find the median.
        for id in spin_ids:
            # Get the spins, then store them.
            spin_from = return_spin(spin_id=id, pipe=pipe_from)
            spin_to = return_spin(spin_id=id, pipe=pipe_to)
            spins_from.append(spin_from)
            spins_to.append(spin_to)

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The first printout.
            if not selected_cluster:
                subsection(file=sys.stdout, text="Copying parameters for the spin block %s"%spin_ids, prespace=2)

            # Change the cluster selection flag.
            selected_cluster = True

            # The model.
            if not model:
                model = spin_from.model

            # Check that the models match for all spins of the cluster.
            if spin_from.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the source data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))
            if spin_to.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the destination data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))

            # Sum the source parameters.
            if 'pA' in spin_from.params:
                pA.append(spin_from.pA)
            if 'pB' in spin_from.params:
                pB.append(spin_from.pB)
            if 'pC' in spin_from.params:
                pC.append(spin_from.pC)
            if 'kex' in spin_from.params:
                kex.append(spin_from.kex)
            if 'kex_AB' in spin_from.params:
                kex_AB.append(spin_from.kex_AB)
            if 'kex_AC' in spin_from.params:
                kex_AC.append(spin_from.kex_AC)
            if 'kex_BC' in spin_from.params:
                kex_BC.append(spin_from.kex_BC)
            if 'k_AB' in spin_from.params:
                k_AB.append(spin_from.k_AB)
            if 'kB' in spin_from.params:
                kB.append(spin_from.kB)
            if 'kC' in spin_from.params:
                kC.append(spin_from.kC)
            if 'tex' in spin_from.params:
                tex.append(spin_from.tex)

            # Increment the spin count.
            count += 1

        # The cluster is not selected, so move to the next.
        if not selected_cluster:
            continue

        # Take median of parameters.
        if len(pA) > 0:
            pA = median(pA)
            print("Median pA value:  %.15f" % pA)
        if len(pB) > 0:
            pB = median(pB)
            print("Median pB value:  %.15f" % pB)
        if len(pC) > 0:
            pC = median(pC)
            print("Median pC value:  %.15f" % pC)
        if len(kex) > 0:
            kex = median(kex)
            print("Median kex value: %.15f" % kex)
        if len(kex_AB) > 0:
            kex_AB = median(kex_AB)
            print("Median k_AB value: %.15f" % kex_AB)
        if len(kex_AC) > 0:
            kex_AC = median(kex_AC)
            print("Median k_AC value: %.15f" % kex_AC)
        if len(kex_BC) > 0:
            kex_BC = median(kex_BC)
            print("Median k_BC value: %.15f" % kex_BC)
        if len(k_AB) > 0:
            k_AB = median(k_AB)
            print("Median k_AB value: %.15f" % k_AB)
        if len(kB) > 0:
            kB = median(kB)
            print("Median kB value:  %.15f" % kB)
        if len(kC) > 0:
            kC = median(kC)
            print("Median kC value:  %.15f" % kC)
        if len(tex) > 0:
            tex = median(tex)
            print("Median tex value: %.15f" % tex)

        # Loop over the spins, this time copying the parameters.
        for i in range(len(spin_ids)):
            # Alias the containers.
            spin_from = spins_from[i]
            spin_to = spins_to[i]

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The R20 parameters.
            if 'r2' in spin_from.params:
                spin_to.r2 = deepcopy(spin_from.r2)
            if 'r2a' in spin_from.params:
                spin_to.r2a = deepcopy(spin_from.r2a)
            if 'r2b' in spin_from.params:
                spin_to.r2b = deepcopy(spin_from.r2b)

            # The median parameters.
            if 'pB' in spin_from.params and 'pC' not in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = pB
                spin_to.pC = 1.0 - pA - pB
            elif 'pA' in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = 1.0 - pA
            if 'kex' in spin_from.params:
                spin_to.kex = kex
            if 'kex_AB' in spin_from.params:
                spin_to.kex_AB = kex_AB
            if 'kex_AC' in spin_from.params:
                spin_to.kex_AC = kex_AC
            if 'kex_BC' in spin_from.params:
                spin_to.kex_BC = kex_BC
            if 'k_AB' in spin_from.params:
                spin_to.k_AB = k_AB
            if 'kB' in spin_from.params:
                spin_to.kB = kB
            if 'kC' in spin_from.params:
                spin_to.kC = kC
            if 'tex' in spin_from.params:
                spin_to.tex = tex

            # All other spin specific parameters.
            for param in spin_from.params:
                if param in ['r2', 'r2a', 'r2b', 'pA', 'pB', 'pC', 'kex', 'kex_AB', 'kex_AC', 'kex_BC', 'k_AB', 'kB', 'kC', 'tex']:
                    continue

                # Copy the value.
                setattr(spin_to, param, deepcopy(getattr(spin_from, param)))

    # Switch back to the original data pipe.
    pipes.switch(pipe_orig)