Example #1
0
    def test_loop_time_cpmg(self):
        """Unit test of the loop_time() function.

        This uses the data of the saved state attached to U{bug #21665<https://gna.org/bugs/?21665>}.
        """

        # Load the state.
        statefile = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'dispersion'+sep+'bug_21665.bz2'
        state.load_state(statefile, force=True)

        # Original data (exp_type, frq, offset, point).
        data = [
            ['SQ CPMG', 499862140.0, 0, [50.0, 100.0, 150.0, 200.0, 250.0, 300.0, 350.0, 400.0, 450.0, 500.0, 550.0, 600.0, 650.0, 700.0, 800.0, 900.0, 1000.0], 0.04],
            ['SQ CPMG', 599890858.69999993, 0, [33.3333, 66.666, 100.0, 133.333, 166.666, 200.0, 233.333, 266.666, 300.0, 333.333, 366.666, 400.0, 466.666, 533.333, 666.666, 866.666, 1000.0], 0.06]
        ]

        # Original indices (ei, mi, oi, ti).
        indices = [
            [0, 0, 0, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 0],
            [0, 1, 0, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 0]
        ]

        # Check the number of iterations.
        print("Checking the number of iterations of the loop.")
        count_frq = 0
        for dat in data:
            frq = dat[1]
            for time, ti in loop_time(frq=frq, return_indices=True):
                print(time, ti)
                count_frq += 1
        self.assertEqual(count_frq, 2)
Example #2
0
def back_calc_peak_intensities(spin=None, exp_type=None, frq=None, offset=None, point=None):
    """Back-calculation of peak intensity for the given relaxation time.

    @keyword spin:      The specific spin data container.
    @type spin:         SpinContainer instance
    @keyword exp_type:  The experiment type.
    @type exp_type:     str
    @keyword frq:       The spectrometer frequency.
    @type frq:          float
    @keyword offset:    For R1rho-type data, the spin-lock offset value in ppm.
    @type offset:       None or float
    @keyword point:     The dispersion point data (either the spin-lock field strength in Hz or the nu_CPMG frequency in Hz).
    @type point:        float
    @return:            The back-calculated peak intensities for the given exponential curve.
    @rtype:             numpy rank-1 float array
    """

    # Check.
    if not has_exponential_exp_type():
        raise RelaxError("Back-calculation is not allowed for the fixed time experiment types.")

    # The key.
    param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

    # Create the initial parameter vector.
    param_vector = assemble_param_vector(spins=[spin], key=param_key)

    # The peak intensities and times.
    values = []
    errors = []
    times = []
    for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
        # The data.
        values.append(average_intensity(spin=spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time))
        errors.append(average_intensity(spin=spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
        times.append(time)

    # The scaling matrix in a diagonalised list form.
    scaling_list = []
    for i in range(len(param_vector)):
        scaling_list.append(1.0)

    # Initialise the relaxation fit functions.
    model = Relax_fit_opt(model='exp', num_params=len(spin.params), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

    # Make a single function call.  This will cause back calculation and the data will be stored in the C module.
    model.func(param_vector)

    # Get the data back.
    results = model.back_calc_data()

    # Return the correct peak height.
    return results
Example #3
0
    def test_loop_time_cpmg(self):
        """Unit test of the loop_time() function.

        This uses the data of the saved state attached to U{bug #21665<https://web.archive.org/web/https://gna.org/bugs/?21665>}.
        """

        # Load the state.
        statefile = status.install_path + sep + 'test_suite' + sep + 'shared_data' + sep + 'dispersion' + sep + 'bug_21665.bz2'
        state.load_state(statefile, force=True)

        # Original data (exp_type, frq, offset, point).
        data = [[
            'SQ CPMG', 499862140.0, 0,
            [
                50.0, 100.0, 150.0, 200.0, 250.0, 300.0, 350.0, 400.0, 450.0,
                500.0, 550.0, 600.0, 650.0, 700.0, 800.0, 900.0, 1000.0
            ], 0.04
        ],
                [
                    'SQ CPMG', 599890858.69999993, 0,
                    [
                        33.3333, 66.666, 100.0, 133.333, 166.666, 200.0,
                        233.333, 266.666, 300.0, 333.333, 366.666, 400.0,
                        466.666, 533.333, 666.666, 866.666, 1000.0
                    ], 0.06
                ]]

        # Original indices (ei, mi, oi, ti).
        indices = [
            [
                0, 0, 0,
                [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 0
            ],
            [
                0, 1, 0,
                [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], 0
            ]
        ]

        # Check the number of iterations.
        print("Checking the number of iterations of the loop.")
        count_frq = 0
        for dat in data:
            frq = dat[1]
            for time, ti in loop_time(frq=frq, return_indices=True):
                print(time, ti)
                count_frq += 1
        self.assertEqual(count_frq, 2)
Example #4
0
    def test_loop_time_r1rho(self):
        """Unit test of the loop_time() function for R1rho setup.

        This uses the data of the saved state attached to U{bug #21344<https://web.archive.org/web/https://gna.org/bugs/?21344>}.
        """

        # Load the state.
        statefile = status.install_path + sep + 'test_suite' + sep + 'shared_data' + sep + 'dispersion' + sep + 'bug_21344_trunc.bz2'
        state.load_state(statefile, force=True)

        # Offset and point combinations, with their associated time.
        offset_point_time_list = [
            [118.078, 431.0, [0.0, 0.04, 0.1, 0.2]],
            [118.078, 651.2, [0.0, 0.04, 0.1, 0.2, 0.4]],
            [118.078, 800.5, [0.0, 0.04, 0.1, 0.2, 0.4]],
            [118.078, 984.0, [0.0, 0.04, 0.1, 0.2, 0.4]],
            [118.078, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]],
            [118.078, 1648.5, [0.0, 0.04, 0.1, 0.14, 0.2]],
            [124.247031462, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]],
            [130.416062924, 800.5, [0.0, 0.04, 0.1, 0.2, 0.4]],
            [130.416062924, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]],
            [130.416062924, 1648.5, [0.0, 0.04, 0.1, 0.14, 0.2]],
            [142.754125848, 800.5, [0.0, 0.04, 0.1, 0.2, 0.4]],
            [142.754125848, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]],
            [179.768314621, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]],
            [241.458629241, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]]
        ]

        # Check the number of iterations.
        print("Checking the number of iterations of the loop.")
        frq = 799777399.1

        for offset, point, time_list in offset_point_time_list:
            # Define count
            count = 0
            for time, ti in loop_time(frq=frq,
                                      offset=offset,
                                      point=point,
                                      return_indices=True):
                print(frq, offset, point, time, ti, count)
                self.assertEqual(time, time_list[count])
                self.assertEqual(ti, count)
                count += 1
Example #5
0
    def test_loop_time_r1rho(self):
        """Unit test of the loop_time() function for R1rho setup.

        This uses the data of the saved state attached to U{bug #21344<https://gna.org/bugs/?21344>}.
        """

        # Load the state.
        statefile = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'dispersion'+sep+'bug_21344_trunc.bz2'
        state.load_state(statefile, force=True)

        # Offset and point combinations, with their associated time.
        offset_point_time_list = [
        [118.078, 431.0, [0.0, 0.04, 0.1, 0.2]],
        [118.078, 651.2, [0.0, 0.04, 0.1, 0.2, 0.4]],
        [118.078, 800.5, [0.0, 0.04, 0.1, 0.2, 0.4]],
        [118.078, 984.0, [0.0, 0.04, 0.1, 0.2, 0.4]],
        [118.078, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]],
        [118.078, 1648.5, [0.0, 0.04, 0.1, 0.14, 0.2]],
        [124.247031462, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]],
        [130.416062924, 800.5, [0.0, 0.04, 0.1, 0.2, 0.4]],
        [130.416062924, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]],
        [130.416062924, 1648.5, [0.0, 0.04, 0.1, 0.14, 0.2]],
        [142.754125848, 800.5, [0.0, 0.04, 0.1, 0.2, 0.4]],
        [142.754125848, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]],
        [179.768314621, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]],
        [241.458629241, 1341.11, [0.0, 0.04, 0.1, 0.2, 0.4]]]


        # Check the number of iterations.
        print("Checking the number of iterations of the loop.")
        frq = 799777399.1

        for offset, point, time_list in offset_point_time_list:
            # Define count
            count = 0
            for time, ti in loop_time(frq=frq, offset=offset, point=point, return_indices=True):
                print(frq, offset, point, time, ti, count)
                self.assertEqual(time, time_list[count])
                self.assertEqual(ti, count)
                count += 1
Example #6
0
    def test_find_intensity_keys_r1rho(self):
        """Unit test of the find_intensity_keys() function.

        This uses the data of the saved state attached to U{bug #21344<https://gna.org/bugs/?21344>}.
        """

        # Load the state.
        statefile = status.install_path + sep+'test_suite'+sep+'shared_data'+sep+'dispersion'+sep+'bug_21344_trunc.bz2'
        state.load_state(statefile, force=True)

        # Original data (spectrum id: exp_type, frq, omega_rf_ppm, spin_lock_field_strength, time_spin_lock).
        data = dict()
        data['118_431.00_0.00'] = ['46_0_35_0', 'R1rho', 799777399.1, 118.078, 431.0, 0.0]
        data['118_431.00_0.04'] = ['48_0_35_4', 'R1rho', 799777399.1, 118.078, 431.0, 0.04]
        data['118_431.00_0.10'] = ['47_0_35_10', 'R1rho', 799777399.1, 118.078, 431.0, 0.1]
        data['118_431.00_0.20'] = ['49_0_35_20', 'R1rho', 799777399.1, 118.078, 431.0, 0.2]
        data['118_651.20_0.00'] = ['36_0_39_0', 'R1rho', 799777399.1, 118.078, 651.2, 0.0]
        data['118_651.20_0.04'] = ['39_0_39_4', 'R1rho', 799777399.1, 118.078, 651.2, 0.04]
        data['118_651.20_0.10'] = ['37_0_39_10', 'R1rho', 799777399.1, 118.078, 651.2, 0.1]
        data['118_651.20_0.20'] = ['40_0_39_20', 'R1rho', 799777399.1, 118.078, 651.2, 0.2]
        data['118_651.20_0.40'] = ['38_0_39_40', 'R1rho', 799777399.1, 118.078, 651.2, 0.4]
        data['118_800.50_0.00'] = ['41_0_41_0', 'R1rho', 799777399.1, 118.078, 800.5, 0.0]
        data['118_800.50_0.04'] = ['44_0_41_4', 'R1rho', 799777399.1, 118.078, 800.5, 0.04]
        data['118_800.50_0.10'] = ['42_0_41_10', 'R1rho', 799777399.1, 118.078, 800.5, 0.1]
        data['118_800.50_0.20'] = ['45_0_41_20', 'R1rho', 799777399.1, 118.078, 800.5, 0.2]
        data['118_800.50_0.40'] = ['43_0_41_40', 'R1rho', 799777399.1, 118.078, 800.5, 0.4]
        data['118_984.00_0.00'] = ['31_0_43_0', 'R1rho', 799777399.1, 118.078, 984.0, 0.0]
        data['118_984.00_0.04'] = ['34_0_43_4', 'R1rho', 799777399.1, 118.078, 984.0, 0.04]
        data['118_984.00_0.10'] = ['32_0_43_10', 'R1rho', 799777399.1, 118.078, 984.0, 0.1]
        data['118_984.00_0.20'] = ['35_0_43_20', 'R1rho', 799777399.1, 118.078, 984.0, 0.2]
        data['118_984.00_0.40'] = ['33_0_43_40', 'R1rho', 799777399.1, 118.078, 984.0, 0.4]
        data['118_1341.11_0.00'] = ['1_0_46_0', 'R1rho', 799777399.1, 118.078, 1341.11, 0.0]
        data['118_1341.11_0.04'] = ['4_0_46_4', 'R1rho', 799777399.1, 118.078, 1341.11, 0.04]
        data['118_1341.11_0.10'] = ['2_0_46_10', 'R1rho', 799777399.1, 118.078, 1341.11, 0.1]
        data['118_1341.11_0.20'] = ['5_0_46_20', 'R1rho', 799777399.1, 118.078, 1341.11, 0.2]
        data['118_1341.11_0.40'] = ['3_0_46_40', 'R1rho', 799777399.1, 118.078, 1341.11, 0.4]
        data['118_1648.50_0.00'] = ['60_0_48_0', 'R1rho', 799777399.1, 118.078, 1648.5, 0.0]
        data['118_1648.50_0.04'] = ['63_0_48_4', 'R1rho', 799777399.1, 118.078, 1648.5, 0.04]
        data['118_1648.50_0.10'] = ['61_0_48_10', 'R1rho', 799777399.1, 118.078, 1648.5, 0.1]
        data['118_1648.50_0.14'] = ['62_0_48_14', 'R1rho', 799777399.1, 118.078, 1648.5, 0.14]
        data['118_1648.50_0.20'] = ['64_0_48_20', 'R1rho', 799777399.1, 118.078, 1648.5, 0.2]
        data['124_1341.11_0.00'] = ['11_500_46_0', 'R1rho', 799777399.1, 124.247031462, 1341.11, 0.0]
        data['124_1341.11_0.04'] = ['14_500_46_4', 'R1rho', 799777399.1, 124.247031462, 1341.11, 0.04]
        data['124_1341.11_0.10'] = ['12_500_46_10', 'R1rho', 799777399.1, 124.247031462, 1341.11, 0.1]
        data['124_1341.11_0.20'] = ['15_500_46_20', 'R1rho', 799777399.1, 124.247031462, 1341.11, 0.2]
        data['124_1341.11_0.40'] = ['13_500_46_40', 'R1rho', 799777399.1, 124.247031462, 1341.11, 0.4]
        data['130_800.50_0.00'] = ['50_1000_41_0', 'R1rho', 799777399.1, 130.416062924, 800.5, 0.0]
        data['130_800.50_0.04'] = ['53_1000_41_4', 'R1rho', 799777399.1, 130.416062924, 800.5, 0.04]
        data['130_800.50_0.10'] = ['51_1000_41_10', 'R1rho', 799777399.1, 130.416062924, 800.5, 0.1]
        data['130_800.50_0.20'] = ['54_1000_41_20', 'R1rho', 799777399.1, 130.416062924, 800.5, 0.2]
        data['130_800.50_0.40'] = ['52_1000_41_40', 'R1rho', 799777399.1, 130.416062924, 800.5, 0.4]
        data['130_1341.11_0.00'] = ['21_1000_46_0', 'R1rho', 799777399.1, 130.416062924, 1341.11, 0.0]
        data['130_1341.11_0.04'] = ['24_1000_46_4', 'R1rho', 799777399.1, 130.416062924, 1341.11, 0.04]
        data['130_1341.11_0.10'] = ['22_1000_46_10', 'R1rho', 799777399.1, 130.416062924, 1341.11, 0.1]
        data['130_1341.11_0.20'] = ['25_1000_46_20', 'R1rho', 799777399.1, 130.416062924, 1341.11, 0.2]
        data['130_1341.11_0.40'] = ['23_1000_46_40', 'R1rho', 799777399.1, 130.416062924, 1341.11, 0.4]
        data['130_1648.50_0.00'] = ['65_1000_48_0', 'R1rho', 799777399.1, 130.416062924, 1648.5, 0.0]
        data['130_1648.50_0.04'] = ['68_1000_48_4', 'R1rho', 799777399.1, 130.416062924, 1648.5, 0.04]
        data['130_1648.50_0.10'] = ['66_1000_48_10', 'R1rho', 799777399.1, 130.416062924, 1648.5, 0.1]
        data['130_1648.50_0.14'] = ['67_1000_48_14', 'R1rho', 799777399.1, 130.416062924, 1648.5, 0.14]
        data['130_1648.50_0.20'] = ['69_1000_48_20', 'R1rho', 799777399.1, 130.416062924, 1648.5, 0.2]
        data['143_800.50_0.00'] = ['55_2000_41_0', 'R1rho', 799777399.1, 142.754125848, 800.5, 0.0]
        data['143_800.50_0.04'] = ['58_2000_41_4', 'R1rho', 799777399.1, 142.754125848, 800.5, 0.04]
        data['143_800.50_0.10'] = ['56_2000_41_10', 'R1rho', 799777399.1, 142.754125848, 800.5, 0.1]
        data['143_800.50_0.20'] = ['59_2000_41_20', 'R1rho', 799777399.1, 142.754125848, 800.5, 0.2]
        data['143_800.50_0.40'] = ['57_2000_41_40', 'R1rho', 799777399.1, 142.754125848, 800.5, 0.4]
        data['143_1341.11_0.00'] = ['6_2000_46_0', 'R1rho', 799777399.1, 142.754125848, 1341.11, 0.0]
        data['143_1341.11_0.04'] = ['9_2000_46_4', 'R1rho', 799777399.1, 142.754125848, 1341.11, 0.04]
        data['143_1341.11_0.10'] = ['7_2000_46_10', 'R1rho', 799777399.1, 142.754125848, 1341.11, 0.1]
        data['143_1341.11_0.20'] = ['10_2000_46_20', 'R1rho', 799777399.1, 142.754125848, 1341.11, 0.2]
        data['143_1341.11_0.40'] = ['8_2000_46_40', 'R1rho', 799777399.1, 142.754125848, 1341.11, 0.4]
        data['180_1341.11_0.00'] = ['16_5000_46_0', 'R1rho', 799777399.1, 179.768314621, 1341.11, 0.0]
        data['180_1341.11_0.04'] = ['19_5000_46_4', 'R1rho', 799777399.1, 179.768314621, 1341.11, 0.04]
        data['180_1341.11_0.10'] = ['17_5000_46_10', 'R1rho', 799777399.1, 179.768314621, 1341.11, 0.1]
        data['180_1341.11_0.20'] = ['20_5000_46_20', 'R1rho', 799777399.1, 179.768314621, 1341.11, 0.2]
        data['180_1341.11_0.40'] = ['18_5000_46_40', 'R1rho', 799777399.1, 179.768314621, 1341.11, 0.4]
        data['241_1341.11_0.00'] = ['26_10000_46_0', 'R1rho', 799777399.1, 241.458629241, 1341.11, 0.0]
        data['241_1341.11_0.04'] = ['29_10000_46_4', 'R1rho', 799777399.1, 241.458629241, 1341.11, 0.04]
        data['241_1341.11_0.10'] = ['27_10000_46_10', 'R1rho', 799777399.1, 241.458629241, 1341.11, 0.1]
        data['241_1341.11_0.20'] = ['30_10000_46_20', 'R1rho', 799777399.1, 241.458629241, 1341.11, 0.2]
        data['241_1341.11_0.40'] = ['28_10000_46_40', 'R1rho', 799777399.1, 241.458629241, 1341.11, 0.4]

        # Check the number of time counts.
        print("Checking the id return experiment.")
        for id in cdp.exp_type:
            exp_type = cdp.exp_type[id]
            frq = cdp.spectrometer_frq[id]
            offset = cdp.spin_lock_offset[id]
            point = cdp.spin_lock_nu1[id]

            # Loop over time
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                ids = find_intensity_keys(exp_type=exp_type, frq=frq, offset=offset, point=point, time=time)

                print(exp_type, frq, offset, point, time, data["%3.0f_%3.2f_%1.2f"%(offset, point, time)][5], id, ids)

                # Test the id return
                self.assertEqual(len(ids), 1)
                # Test the time point
                self.assertEqual(time, data["%3.0f_%3.2f_%1.2f"%(offset, point, time)][5])
                self.assertEqual(ids[0], data["%3.0f_%3.2f_%1.2f"%(offset, point, time)][0])
Example #7
0
    def test_find_intensity_keys_r1rho(self):
        """Unit test of the find_intensity_keys() function.

        This uses the data of the saved state attached to U{bug #21344<https://web.archive.org/web/https://gna.org/bugs/?21344>}.
        """

        # Load the state.
        statefile = status.install_path + sep + 'test_suite' + sep + 'shared_data' + sep + 'dispersion' + sep + 'bug_21344_trunc.bz2'
        state.load_state(statefile, force=True)

        # Original data (spectrum id: exp_type, frq, omega_rf_ppm, spin_lock_field_strength, time_spin_lock).
        data = dict()
        data['118_431.00_0.00'] = [
            '46_0_35_0', 'R1rho', 799777399.1, 118.078, 431.0, 0.0
        ]
        data['118_431.00_0.04'] = [
            '48_0_35_4', 'R1rho', 799777399.1, 118.078, 431.0, 0.04
        ]
        data['118_431.00_0.10'] = [
            '47_0_35_10', 'R1rho', 799777399.1, 118.078, 431.0, 0.1
        ]
        data['118_431.00_0.20'] = [
            '49_0_35_20', 'R1rho', 799777399.1, 118.078, 431.0, 0.2
        ]
        data['118_651.20_0.00'] = [
            '36_0_39_0', 'R1rho', 799777399.1, 118.078, 651.2, 0.0
        ]
        data['118_651.20_0.04'] = [
            '39_0_39_4', 'R1rho', 799777399.1, 118.078, 651.2, 0.04
        ]
        data['118_651.20_0.10'] = [
            '37_0_39_10', 'R1rho', 799777399.1, 118.078, 651.2, 0.1
        ]
        data['118_651.20_0.20'] = [
            '40_0_39_20', 'R1rho', 799777399.1, 118.078, 651.2, 0.2
        ]
        data['118_651.20_0.40'] = [
            '38_0_39_40', 'R1rho', 799777399.1, 118.078, 651.2, 0.4
        ]
        data['118_800.50_0.00'] = [
            '41_0_41_0', 'R1rho', 799777399.1, 118.078, 800.5, 0.0
        ]
        data['118_800.50_0.04'] = [
            '44_0_41_4', 'R1rho', 799777399.1, 118.078, 800.5, 0.04
        ]
        data['118_800.50_0.10'] = [
            '42_0_41_10', 'R1rho', 799777399.1, 118.078, 800.5, 0.1
        ]
        data['118_800.50_0.20'] = [
            '45_0_41_20', 'R1rho', 799777399.1, 118.078, 800.5, 0.2
        ]
        data['118_800.50_0.40'] = [
            '43_0_41_40', 'R1rho', 799777399.1, 118.078, 800.5, 0.4
        ]
        data['118_984.00_0.00'] = [
            '31_0_43_0', 'R1rho', 799777399.1, 118.078, 984.0, 0.0
        ]
        data['118_984.00_0.04'] = [
            '34_0_43_4', 'R1rho', 799777399.1, 118.078, 984.0, 0.04
        ]
        data['118_984.00_0.10'] = [
            '32_0_43_10', 'R1rho', 799777399.1, 118.078, 984.0, 0.1
        ]
        data['118_984.00_0.20'] = [
            '35_0_43_20', 'R1rho', 799777399.1, 118.078, 984.0, 0.2
        ]
        data['118_984.00_0.40'] = [
            '33_0_43_40', 'R1rho', 799777399.1, 118.078, 984.0, 0.4
        ]
        data['118_1341.11_0.00'] = [
            '1_0_46_0', 'R1rho', 799777399.1, 118.078, 1341.11, 0.0
        ]
        data['118_1341.11_0.04'] = [
            '4_0_46_4', 'R1rho', 799777399.1, 118.078, 1341.11, 0.04
        ]
        data['118_1341.11_0.10'] = [
            '2_0_46_10', 'R1rho', 799777399.1, 118.078, 1341.11, 0.1
        ]
        data['118_1341.11_0.20'] = [
            '5_0_46_20', 'R1rho', 799777399.1, 118.078, 1341.11, 0.2
        ]
        data['118_1341.11_0.40'] = [
            '3_0_46_40', 'R1rho', 799777399.1, 118.078, 1341.11, 0.4
        ]
        data['118_1648.50_0.00'] = [
            '60_0_48_0', 'R1rho', 799777399.1, 118.078, 1648.5, 0.0
        ]
        data['118_1648.50_0.04'] = [
            '63_0_48_4', 'R1rho', 799777399.1, 118.078, 1648.5, 0.04
        ]
        data['118_1648.50_0.10'] = [
            '61_0_48_10', 'R1rho', 799777399.1, 118.078, 1648.5, 0.1
        ]
        data['118_1648.50_0.14'] = [
            '62_0_48_14', 'R1rho', 799777399.1, 118.078, 1648.5, 0.14
        ]
        data['118_1648.50_0.20'] = [
            '64_0_48_20', 'R1rho', 799777399.1, 118.078, 1648.5, 0.2
        ]
        data['124_1341.11_0.00'] = [
            '11_500_46_0', 'R1rho', 799777399.1, 124.247031462, 1341.11, 0.0
        ]
        data['124_1341.11_0.04'] = [
            '14_500_46_4', 'R1rho', 799777399.1, 124.247031462, 1341.11, 0.04
        ]
        data['124_1341.11_0.10'] = [
            '12_500_46_10', 'R1rho', 799777399.1, 124.247031462, 1341.11, 0.1
        ]
        data['124_1341.11_0.20'] = [
            '15_500_46_20', 'R1rho', 799777399.1, 124.247031462, 1341.11, 0.2
        ]
        data['124_1341.11_0.40'] = [
            '13_500_46_40', 'R1rho', 799777399.1, 124.247031462, 1341.11, 0.4
        ]
        data['130_800.50_0.00'] = [
            '50_1000_41_0', 'R1rho', 799777399.1, 130.416062924, 800.5, 0.0
        ]
        data['130_800.50_0.04'] = [
            '53_1000_41_4', 'R1rho', 799777399.1, 130.416062924, 800.5, 0.04
        ]
        data['130_800.50_0.10'] = [
            '51_1000_41_10', 'R1rho', 799777399.1, 130.416062924, 800.5, 0.1
        ]
        data['130_800.50_0.20'] = [
            '54_1000_41_20', 'R1rho', 799777399.1, 130.416062924, 800.5, 0.2
        ]
        data['130_800.50_0.40'] = [
            '52_1000_41_40', 'R1rho', 799777399.1, 130.416062924, 800.5, 0.4
        ]
        data['130_1341.11_0.00'] = [
            '21_1000_46_0', 'R1rho', 799777399.1, 130.416062924, 1341.11, 0.0
        ]
        data['130_1341.11_0.04'] = [
            '24_1000_46_4', 'R1rho', 799777399.1, 130.416062924, 1341.11, 0.04
        ]
        data['130_1341.11_0.10'] = [
            '22_1000_46_10', 'R1rho', 799777399.1, 130.416062924, 1341.11, 0.1
        ]
        data['130_1341.11_0.20'] = [
            '25_1000_46_20', 'R1rho', 799777399.1, 130.416062924, 1341.11, 0.2
        ]
        data['130_1341.11_0.40'] = [
            '23_1000_46_40', 'R1rho', 799777399.1, 130.416062924, 1341.11, 0.4
        ]
        data['130_1648.50_0.00'] = [
            '65_1000_48_0', 'R1rho', 799777399.1, 130.416062924, 1648.5, 0.0
        ]
        data['130_1648.50_0.04'] = [
            '68_1000_48_4', 'R1rho', 799777399.1, 130.416062924, 1648.5, 0.04
        ]
        data['130_1648.50_0.10'] = [
            '66_1000_48_10', 'R1rho', 799777399.1, 130.416062924, 1648.5, 0.1
        ]
        data['130_1648.50_0.14'] = [
            '67_1000_48_14', 'R1rho', 799777399.1, 130.416062924, 1648.5, 0.14
        ]
        data['130_1648.50_0.20'] = [
            '69_1000_48_20', 'R1rho', 799777399.1, 130.416062924, 1648.5, 0.2
        ]
        data['143_800.50_0.00'] = [
            '55_2000_41_0', 'R1rho', 799777399.1, 142.754125848, 800.5, 0.0
        ]
        data['143_800.50_0.04'] = [
            '58_2000_41_4', 'R1rho', 799777399.1, 142.754125848, 800.5, 0.04
        ]
        data['143_800.50_0.10'] = [
            '56_2000_41_10', 'R1rho', 799777399.1, 142.754125848, 800.5, 0.1
        ]
        data['143_800.50_0.20'] = [
            '59_2000_41_20', 'R1rho', 799777399.1, 142.754125848, 800.5, 0.2
        ]
        data['143_800.50_0.40'] = [
            '57_2000_41_40', 'R1rho', 799777399.1, 142.754125848, 800.5, 0.4
        ]
        data['143_1341.11_0.00'] = [
            '6_2000_46_0', 'R1rho', 799777399.1, 142.754125848, 1341.11, 0.0
        ]
        data['143_1341.11_0.04'] = [
            '9_2000_46_4', 'R1rho', 799777399.1, 142.754125848, 1341.11, 0.04
        ]
        data['143_1341.11_0.10'] = [
            '7_2000_46_10', 'R1rho', 799777399.1, 142.754125848, 1341.11, 0.1
        ]
        data['143_1341.11_0.20'] = [
            '10_2000_46_20', 'R1rho', 799777399.1, 142.754125848, 1341.11, 0.2
        ]
        data['143_1341.11_0.40'] = [
            '8_2000_46_40', 'R1rho', 799777399.1, 142.754125848, 1341.11, 0.4
        ]
        data['180_1341.11_0.00'] = [
            '16_5000_46_0', 'R1rho', 799777399.1, 179.768314621, 1341.11, 0.0
        ]
        data['180_1341.11_0.04'] = [
            '19_5000_46_4', 'R1rho', 799777399.1, 179.768314621, 1341.11, 0.04
        ]
        data['180_1341.11_0.10'] = [
            '17_5000_46_10', 'R1rho', 799777399.1, 179.768314621, 1341.11, 0.1
        ]
        data['180_1341.11_0.20'] = [
            '20_5000_46_20', 'R1rho', 799777399.1, 179.768314621, 1341.11, 0.2
        ]
        data['180_1341.11_0.40'] = [
            '18_5000_46_40', 'R1rho', 799777399.1, 179.768314621, 1341.11, 0.4
        ]
        data['241_1341.11_0.00'] = [
            '26_10000_46_0', 'R1rho', 799777399.1, 241.458629241, 1341.11, 0.0
        ]
        data['241_1341.11_0.04'] = [
            '29_10000_46_4', 'R1rho', 799777399.1, 241.458629241, 1341.11, 0.04
        ]
        data['241_1341.11_0.10'] = [
            '27_10000_46_10', 'R1rho', 799777399.1, 241.458629241, 1341.11, 0.1
        ]
        data['241_1341.11_0.20'] = [
            '30_10000_46_20', 'R1rho', 799777399.1, 241.458629241, 1341.11, 0.2
        ]
        data['241_1341.11_0.40'] = [
            '28_10000_46_40', 'R1rho', 799777399.1, 241.458629241, 1341.11, 0.4
        ]

        # Check the number of time counts.
        print("Checking the id return experiment.")
        for id in cdp.exp_type:
            exp_type = cdp.exp_type[id]
            frq = cdp.spectrometer_frq[id]
            offset = cdp.spin_lock_offset[id]
            point = cdp.spin_lock_nu1[id]

            # Loop over time
            for time in loop_time(exp_type=exp_type,
                                  frq=frq,
                                  offset=offset,
                                  point=point):
                ids = find_intensity_keys(exp_type=exp_type,
                                          frq=frq,
                                          offset=offset,
                                          point=point,
                                          time=time)

                print(exp_type, frq, offset, point, time,
                      data["%3.0f_%3.2f_%1.2f" % (offset, point, time)][5], id,
                      ids)

                # Test the id return
                self.assertEqual(len(ids), 1)
                # Test the time point
                self.assertEqual(
                    time, data["%3.0f_%3.2f_%1.2f" % (offset, point, time)][5])
                self.assertEqual(
                    ids[0],
                    data["%3.0f_%3.2f_%1.2f" % (offset, point, time)][0])
Example #8
0
def estimate_r2eff_err(spin_id=None, epsrel=0.0, verbosity=1):
    """This will estimate the R2eff and i0 errors from the covariance matrix Qxx.  Qxx is calculated from the Jacobian matrix and the optimised parameters.

    @keyword spin_id:       The spin identification string.
    @type spin_id:          str
    @param epsrel:          Any columns of R which satisfy |R_{kk}| <= epsrel |R_{11}| are considered linearly-dependent and are excluded from the covariance matrix, where the corresponding rows and columns of the covariance matrix are set to zero.
    @type epsrel:           float
    @keyword verbosity:     The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:        int
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Perform checks.
    check_model_type(model=MODEL_R2EFF)

    # Loop over the spins.
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(selection=spin_id, full_info=True, return_id=True, skip_desel=True):
        # Generate spin string.
        spin_string = generate_spin_string(spin=cur_spin, mol_name=mol_name, res_num=resi, res_name=resn)

        # Raise Error, if not optimised.
        if not (hasattr(cur_spin, 'r2eff') and hasattr(cur_spin, 'i0')):
            raise RelaxError("Spin %s does not contain optimised 'r2eff' and 'i0' values.  Try execute: minimise.execute(min_algor='Newton', constraints=False)"%(spin_string))

        # Raise warning, if gradient count is 0.  This could point to a lack of minimisation first.
        if hasattr(cur_spin, 'g_count'):
            if getattr(cur_spin, 'g_count') == 0.0:
                text = "Spin %s contains a gradient count of 0.0.  Is the R2eff parameter optimised?  Try execute: minimise.execute(min_algor='Newton', constraints=False)" %(spin_string)
                warn(RelaxWarning("%s." % text))

        # Print information.
        if verbosity >= 1:
            # Individual spin block section.
            top = 2
            if verbosity >= 2:
                top += 2
            subsection(file=sys.stdout, text="Estimating R2eff error for spin: %s"%spin_string, prespace=top)

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point, ei, mi, oi, di in loop_exp_frq_offset_point(return_indices=True):
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

            # Extract values.
            r2eff = getattr(cur_spin, 'r2eff')[param_key]
            i0 = getattr(cur_spin, 'i0')[param_key]

            # Pack data
            param_vector = [r2eff, i0]

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                values.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time))
                errors.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                times.append(time)

            # Convert to numpy array.
            values = asarray(values)
            errors = asarray(errors)
            times = asarray(times)

            # Initialise data in C code.
            scaling_list = [1.0, 1.0]
            model = Relax_fit_opt(model='exp', num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

            # Use the direct Jacobian from function.
            jacobian_matrix_exp = transpose(asarray( model.jacobian(param_vector) ) )
            weights = 1. / errors**2

            # Get the co-variance
            pcov = multifit_covar(J=jacobian_matrix_exp, weights=weights)

            # To compute one standard deviation errors on the parameters, take the square root of the diagonal covariance.
            param_vector_error = sqrt(diag(pcov))

            # Extract values.
            r2eff_err, i0_err = param_vector_error

            # Copy r2eff dictionary, to r2eff_err dictionary. They have same keys to the dictionary,
            if not hasattr(cur_spin, 'r2eff_err'):
                setattr(cur_spin, 'r2eff_err', deepcopy(getattr(cur_spin, 'r2eff')))
            if not hasattr(cur_spin, 'i0_err'):
                setattr(cur_spin, 'i0_err', deepcopy(getattr(cur_spin, 'i0')))

            # Set error.
            cur_spin.r2eff_err[param_key] = r2eff_err
            cur_spin.i0_err[param_key] = i0_err

            # Get other relevant information.
            chi2 = getattr(cur_spin, 'chi2')

            # Print information.
            print_strings = []
            if verbosity >= 1:
                # Add print strings.
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (exp_type, frq/1E6, offset, point, len(times))
                print_strings.append(point_info)

                par_info = "r2eff=%3.3f r2eff_err=%3.4f, i0=%6.1f, i0_err=%3.4f, chi2=%3.3f.\n" % ( r2eff, r2eff_err, i0, i0_err, chi2)
                print_strings.append(par_info)

                if verbosity >= 2:
                    time_info = ', '.join(map(str, times))
                    print_strings.append('For time array: '+time_info+'.\n\n')

            # Print info
            if len(print_strings) > 0:
                for print_string in print_strings:
                    print(print_string),
Example #9
0
E = Exp(verbosity=1)

for cur_spin, mol_name, resi, resn, spin_id in spin_loop(full_info=True, return_id=True, skip_desel=True):
    # Generate spin string.
    spin_string = generate_spin_string(spin=cur_spin, mol_name=mol_name, res_num=resi, res_name=resn)

    for exp_type, frq, offset, point, ei, mi, oi, di in loop_exp_frq_offset_point(return_indices=True):
        # Generate the param_key.
        param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

        ## Now try do a line of best fit by least squares.
        # The peak intensities, errors and times.
        values = []
        errors = []
        times = []
        for time, ti in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point, return_indices=True):
            value = average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, sim_index=None)
            values.append(value)

            error = average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True)
            errors.append(error)
            times.append(time)

            # Find intensity keys.
            int_keys = find_intensity_keys(exp_type=exp_type, frq=frq, offset=offset, point=point, time=time)
            #print type(int_keys)
            # Loop over the replicates.
            #for i in range(len(int_keys)):
                #print( cur_spin.peak_intensity[int_keys[i]], value )
                #print( cur_spin.peak_intensity_err[int_keys[i]], error)
Example #10
0
def minimise_r2eff(spins=None, spin_ids=None, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
    """Optimise the R2eff model by fitting the 2-parameter exponential curves.

    This mimics the R1 and R2 relax_fit analysis.


    @keyword spins:             The list of spins for the cluster.
    @type spins:                list of SpinContainer instances
    @keyword spin_ids:          The list of spin IDs for the cluster.
    @type spin_ids:             list of str
    @keyword min_algor:         The minimisation algorithm to use.
    @type min_algor:            str
    @keyword min_options:       An array of options to be used by the minimisation algorithm.
    @type min_options:          array of str
    @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type func_tol:             None or float
    @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type grad_tol:             None or float
    @keyword max_iterations:    The maximum number of iterations for the algorithm.
    @type max_iterations:       int
    @keyword constraints:       If True, constraints are used during optimisation.
    @type constraints:          bool
    @keyword scaling_matrix:    The diagonal and square scaling matrix.
    @type scaling_matrix:       numpy rank-2, float64 array or None
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
    @type sim_index:            None or int
    @keyword lower:             The model specific lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type lower:                list of numbers
    @keyword upper:             The model specific upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type upper:                list of numbers
    @keyword inc:               The model specific increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
    @type inc:                  list of int
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Loop over the spins.
    for si in range(len(spins)):
        # Skip deselected spins.
        if not spins[si].select:
            continue

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point in loop_exp_frq_offset_point():
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

            # The initial parameter vector.
            param_vector = assemble_param_vector(spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Diagonal scaling.
            if scaling_matrix is not None:
                param_vector = dot(inv(scaling_matrix), param_vector)

            # Linear constraints.
            A, b = None, None
            if constraints:
                A, b = linear_constraints(spins=[spins[si]], scaling_matrix=scaling_matrix)

            # Print out.
            if verbosity >= 1:
                # Individual spin section.
                top = 2
                if verbosity >= 2:
                    top += 2
                text = "Fitting to spin %s, frequency %s and dispersion point %s" % (spin_ids[si], frq, point)
                subsection(file=sys.stdout, text=text, prespace=top)

                # Grid search printout.
                if match('^[Gg]rid', min_algor):
                    result = 1
                    for x in inc:
                        result = mul(result, x)
                    print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % result)

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                values.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, sim_index=sim_index))
                errors.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                times.append(time)

            # Raise errors if number of time points is less than 2.
            if len(times) < 3:
                subsection(file=sys.stdout, text="Exponential curve fitting error for point:", prespace=2)
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (exp_type, frq/1E6, offset, point, len(times))
                print(point_info)
                raise RelaxError("The data setup points to exponential curve fitting, but only %i time points was found, where 3 time points is minimum.  If calculating R2eff values for fixed relaxation time period data, check that a reference intensity has been specified for each offset value."%(len(times)))

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            if scaling_matrix is None:
                for i in range(len(param_vector)):
                    scaling_list.append(1.0)
            else:
                for i in range(len(scaling_matrix)):
                    scaling_list.append(scaling_matrix[i, i])

            # Initialise the function to minimise.
            model = Relax_fit_opt(model='exp', num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling_matrix is not None:
                param_vector = dot(scaling_matrix, param_vector)

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spins[si].chi2_sim[sim_index] = chi2

                # Iterations.
                spins[si].iter_sim[sim_index] = iter_count

                # Function evaluations.
                spins[si].f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spins[si].g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spins[si].h_count_sim[sim_index] = h_count

                # Warning.
                spins[si].warning_sim[sim_index] = warning

            # Normal statistics.
            else:
                # Chi-squared statistic.
                spins[si].chi2 = chi2

                # Iterations.
                spins[si].iter = iter_count

                # Function evaluations.
                spins[si].f_count = f_count

                # Gradient evaluations.
                spins[si].g_count = g_count

                # Hessian evaluations.
                spins[si].h_count = h_count

                # Warning.
                spins[si].warning = warning
        # Add key to dic.
        my_dic[spin_id][param_key] = OrderedDict()

        # Get the value.
        r2eff = getattr(cur_spin, 'r2eff')[param_key]
        my_dic[spin_id][param_key]['r2eff'] = r2eff
        #r2eff_err = getattr(cur_spin, 'r2eff_err')[param_key]
        i0 = getattr(cur_spin, 'i0')[param_key]
        #i0_err = getattr(cur_spin, 'i0_err')[param_key]
        my_dic[spin_id][param_key]['i0'] = i0

        if do_boot:
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                values.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time))
                errors.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                times.append(time)

            # Convert to numpy array.
            values = asarray(values)
            errors = asarray(errors)
            times = asarray(times)

            R_m_sim_l = []
            I0_m_sim_l = []
            for j in range(sim_boot):
                if j in range(0, 100000, 100):
                    print("Simulation %i"%j)
                # Start minimisation.
Example #12
0
def back_calc_peak_intensities(spin=None, spin_id=None, exp_type=None, frq=None, offset=None, point=None):
    """Back-calculation of peak intensity for the given relaxation time.

    @keyword spin:      The specific spin data container.
    @type spin:         SpinContainer instance
    @keyword spin_id:   The optional spin ID string for use in warning messages.
    @type spin_id:      str or None
    @keyword exp_type:  The experiment type.
    @type exp_type:     str
    @keyword frq:       The spectrometer frequency.
    @type frq:          float
    @keyword offset:    For R1rho-type data, the spin-lock offset value in ppm.
    @type offset:       None or float
    @keyword point:     The dispersion point data (either the spin-lock field strength in Hz or the nu_CPMG frequency in Hz).
    @type point:        float
    @return:            The back-calculated peak intensities for the given exponential curve.
    @rtype:             numpy rank-1 float array
    """

    # Check.
    if not has_exponential_exp_type():
        raise RelaxError("Back-calculation is not allowed for the fixed time experiment types.")

    # The key.
    param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

    # Create the initial parameter vector.
    param_vector = assemble_param_vector(spins=[spin], key=param_key)

    # The peak intensities and times.
    values = []
    errors = []
    times = []
    for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
        # Check the peak intensity keys.
        int_keys = find_intensity_keys(exp_type=exp_type, frq=frq, offset=offset, point=point, time=time)
        for i in range(len(int_keys)):
            if int_keys[i] not in spin.peak_intensity:
                if spin_id:
                    warn(RelaxWarning("The spin %s peak intensity key '%s' is not present, skipping the back-calculation." % (spin_id, int_keys[i])))
                else:
                    warn(RelaxWarning("The peak intensity key '%s' is not present, skipping the back-calculation." % int_keys[i]))
                return

        # The data.
        values.append(average_intensity(spin=spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time))
        errors.append(average_intensity(spin=spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
        times.append(time)

    # The scaling matrix in a diagonalised list form.
    scaling_list = []
    for i in range(len(param_vector)):
        scaling_list.append(1.0)

    # Initialise the relaxation fit functions.
    model = Relax_fit_opt(model='exp', num_params=len(spin.params), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

    # Make a single function call.  This will cause back calculation and the data will be stored in the C module.
    model.func(param_vector)

    # Get the data back.
    results = model.back_calc_data()

    # Return the correct peak height.
    return results
Example #13
0
def minimise_r2eff(spins=None, spin_ids=None, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
    """Optimise the R2eff model by fitting the 2-parameter exponential curves.

    This mimics the R1 and R2 relax_fit analysis.


    @keyword spins:             The list of spins for the cluster.
    @type spins:                list of SpinContainer instances
    @keyword spin_ids:          The list of spin IDs for the cluster.
    @type spin_ids:             list of str
    @keyword min_algor:         The minimisation algorithm to use.
    @type min_algor:            str
    @keyword min_options:       An array of options to be used by the minimisation algorithm.
    @type min_options:          array of str
    @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type func_tol:             None or float
    @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type grad_tol:             None or float
    @keyword max_iterations:    The maximum number of iterations for the algorithm.
    @type max_iterations:       int
    @keyword constraints:       If True, constraints are used during optimisation.
    @type constraints:          bool
    @keyword scaling_matrix:    The diagonal and square scaling matrix.
    @type scaling_matrix:       numpy rank-2, float64 array or None
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
    @type sim_index:            None or int
    @keyword lower:             The model specific lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type lower:                list of numbers
    @keyword upper:             The model specific upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type upper:                list of numbers
    @keyword inc:               The model specific increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
    @type inc:                  list of int
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Loop over the spins.
    for si in range(len(spins)):
        # Skip deselected spins.
        if not spins[si].select:
            continue

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point in loop_exp_frq_offset_point():
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

            # The initial parameter vector.
            param_vector = assemble_param_vector(spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Diagonal scaling.
            if scaling_matrix is not None:
                param_vector = dot(inv(scaling_matrix), param_vector)

            # Linear constraints.
            A, b = None, None
            if constraints:
                A, b = linear_constraints(spins=[spins[si]], scaling_matrix=scaling_matrix)

            # Print out.
            if verbosity >= 1:
                # Individual spin section.
                top = 2
                if verbosity >= 2:
                    top += 2
                text = "Fitting to spin %s, frequency %s and dispersion point %s" % (spin_ids[si], frq, point)
                subsection(file=sys.stdout, text=text, prespace=top)

                # Grid search printout.
                if match('^[Gg]rid', min_algor):
                    result = 1
                    for x in inc:
                        result = mul(result, x)
                    print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % result)

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            data_flag = True
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                # Check the peak intensity keys.
                int_keys = find_intensity_keys(exp_type=exp_type, frq=frq, offset=offset, point=point, time=time)
                peak_intensities = spins[si].peak_intensity
                if sim_index != None:
                    peak_intensities = spins[si].peak_intensity_sim
                for i in range(len(int_keys)):
                    if int_keys[i] not in peak_intensities:
                        if verbosity:
                            warn(RelaxWarning("The spin %s peak intensity key '%s' is not present, skipping the optimisation." % (spin_ids[si], int_keys[i])))
                        data_flag = False
                        break

                if data_flag:
                    values.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, sim_index=sim_index))
                    errors.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                    times.append(time)
            if not data_flag:
                continue

            # Raise errors if number of time points is less than 2.
            if len(times) < 3:
                subsection(file=sys.stdout, text="Exponential curve fitting error for point:", prespace=2)
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (exp_type, frq/1E6, offset, point, len(times))
                raise RelaxError("The data setup points to exponential curve fitting, but only %i time points was found, where 3 time points is minimum.  If calculating R2eff values for fixed relaxation time period data, check that a reference intensity has been specified for each offset value."%(len(times)))

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            if scaling_matrix is None:
                for i in range(len(param_vector)):
                    scaling_list.append(1.0)
            else:
                for i in range(len(scaling_matrix)):
                    scaling_list.append(scaling_matrix[i, i])

            # Initialise the function to minimise.
            model = Relax_fit_opt(model='exp', num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling_matrix is not None:
                param_vector = dot(scaling_matrix, param_vector)

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spins[si].chi2_sim[sim_index] = chi2

                # Iterations.
                spins[si].iter_sim[sim_index] = iter_count

                # Function evaluations.
                spins[si].f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spins[si].g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spins[si].h_count_sim[sim_index] = h_count

                # Warning.
                spins[si].warning_sim[sim_index] = warning

            # Normal statistics.
            else:
                # Chi-squared statistic.
                spins[si].chi2 = chi2

                # Iterations.
                spins[si].iter = iter_count

                # Function evaluations.
                spins[si].f_count = f_count

                # Gradient evaluations.
                spins[si].g_count = g_count

                # Hessian evaluations.
                spins[si].h_count = h_count

                # Warning.
                spins[si].warning = warning
Example #14
0
    for exp_type, frq, offset, point, ei, mi, oi, di in loop_exp_frq_offset_point(
            return_indices=True):
        # Generate the param_key.
        param_key = return_param_key_from_data(exp_type=exp_type,
                                               frq=frq,
                                               offset=offset,
                                               point=point)

        ## Now try do a line of best fit by least squares.
        # The peak intensities, errors and times.
        values = []
        errors = []
        times = []
        for time, ti in loop_time(exp_type=exp_type,
                                  frq=frq,
                                  offset=offset,
                                  point=point,
                                  return_indices=True):
            value = average_intensity(spin=cur_spin,
                                      exp_type=exp_type,
                                      frq=frq,
                                      offset=offset,
                                      point=point,
                                      time=time,
                                      sim_index=None)
            values.append(value)

            error = average_intensity(spin=cur_spin,
                                      exp_type=exp_type,
                                      frq=frq,
                                      offset=offset,
Example #15
0
def estimate_r2eff(method='minfx',
                   min_algor='simplex',
                   c_code=True,
                   constraints=False,
                   chi2_jacobian=False,
                   spin_id=None,
                   ftol=1e-15,
                   xtol=1e-15,
                   maxfev=10000000,
                   factor=100.0,
                   verbosity=1):
    """Estimate r2eff and errors by exponential curve fitting with scipy.optimize.leastsq or minfx.

    THIS IS ONLY FOR TESTING.

    scipy.optimize.leastsq is a wrapper around MINPACK's lmdif and lmder algorithms.

    MINPACK is a FORTRAN90 library which solves systems of nonlinear equations, or carries out the least squares minimization of the residual of a set of linear or nonlinear equations.

    Errors are calculated by taking the square root of the reported co-variance.

    This can be an huge time saving step, when performing model fitting in R1rho.
    Errors of R2eff values, are normally estimated by time-consuming Monte-Carlo simulations.

    Initial guess for the starting parameter x0 = [r2eff_est, i0_est], is by converting the exponential curve to a linear problem.
    Then solving initial guess by linear least squares of: ln(Intensity[j]) = ln(i0) - time[j]* r2eff.


    @keyword method:            The method to minimise and estimate errors.  Options are: 'minfx' or 'scipy.optimize.leastsq'.
    @type method:               string
    @keyword min_algor:         The minimisation algorithm
    @type min_algor:            string
    @keyword c_code:            If optimise with C code.
    @type c_code:               bool
    @keyword constraints:       If constraints should be used.
    @type constraints:          bool
    @keyword chi2_jacobian:     If the chi2 Jacobian should be used.
    @type chi2_jacobian:        bool
    @keyword spin_id:           The spin identification string.
    @type spin_id:              str
    @keyword ftol:              The function tolerance for the relative error desired in the sum of squares, parsed to leastsq.
    @type ftol:                 float
    @keyword xtol:              The error tolerance for the relative error desired in the approximate solution, parsed to leastsq.
    @type xtol:                 float
    @keyword maxfev:            The maximum number of function evaluations, parsed to leastsq.  If zero, then 100*(N+1) is the maximum function calls.  N is the number of elements in x0=[r2eff, i0].
    @type maxfev:               int
    @keyword factor:            The initial step bound, parsed to leastsq.  It determines the initial step bound (''factor * || diag * x||'').  Should be in the interval (0.1, 100).
    @type factor:               float
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    """

    # Perform checks.
    check_model_type(model=MODEL_R2EFF)

    # Check that the C modules have been compiled.
    if not C_module_exp_fn and method == 'minfx':
        raise RelaxError(
            "Relaxation curve fitting is not available.  Try compiling the C modules on your platform."
        )

    # Set class scipy setting.
    E = Exp(verbosity=verbosity)
    E.set_settings_leastsq(ftol=ftol, xtol=xtol, maxfev=maxfev, factor=factor)

    # Check if intensity errors have already been calculated by the user.
    precalc = True
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(
            selection=spin_id, full_info=True, return_id=True,
            skip_desel=True):
        # No structure.
        if not hasattr(cur_spin, 'peak_intensity_err'):
            precalc = False
            break

        # Determine if a spectrum ID is missing from the list.
        for id in cdp.spectrum_ids:
            if id not in cur_spin.peak_intensity_err:
                precalc = False
                break

    # Loop over the spins.
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(
            selection=spin_id, full_info=True, return_id=True,
            skip_desel=True):
        # Generate spin string.
        spin_string = generate_spin_string(spin=cur_spin,
                                           mol_name=mol_name,
                                           res_num=resi,
                                           res_name=resn)

        # Print information.
        if E.verbosity >= 1:
            # Individual spin block section.
            top = 2
            if E.verbosity >= 2:
                top += 2
            subsection(file=sys.stdout,
                       text="Fitting with %s to: %s" % (method, spin_string),
                       prespace=top)
            if method == 'minfx':
                subsection(
                    file=sys.stdout,
                    text=
                    "min_algor='%s', c_code=%s, constraints=%s, chi2_jacobian?=%s"
                    % (min_algor, c_code, constraints, chi2_jacobian),
                    prespace=0)

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point, ei, mi, oi, di in loop_exp_frq_offset_point(
                return_indices=True):
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type,
                                                   frq=frq,
                                                   offset=offset,
                                                   point=point)

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type,
                                  frq=frq,
                                  offset=offset,
                                  point=point):
                values.append(
                    average_intensity(spin=cur_spin,
                                      exp_type=exp_type,
                                      frq=frq,
                                      offset=offset,
                                      point=point,
                                      time=time))
                errors.append(
                    average_intensity(spin=cur_spin,
                                      exp_type=exp_type,
                                      frq=frq,
                                      offset=offset,
                                      point=point,
                                      time=time,
                                      error=True))
                times.append(time)

            # Convert to numpy array.
            values = asarray(values)
            errors = asarray(errors)
            times = asarray(times)

            # Initialise data.
            E.setup_data(values=values, errors=errors, times=times)

            # Get the result based on method.
            if method == 'scipy.optimize.leastsq':
                # Acquire results.
                results = minimise_leastsq(E=E)

            elif method == 'minfx':
                # Set settings.
                E.set_settings_minfx(min_algor=min_algor,
                                     c_code=c_code,
                                     chi2_jacobian=chi2_jacobian,
                                     constraints=constraints)

                # Acquire results.
                results = minimise_minfx(E=E)
            else:
                raise RelaxError(
                    "Method for minimisation not known. Try setting: method='scipy.optimize.leastsq'."
                )

            # Unpack results
            param_vector, param_vector_error, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Extract values.
            r2eff = param_vector[0]
            i0 = param_vector[1]
            r2eff_err = param_vector_error[0]
            i0_err = param_vector_error[1]

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector,
                                     spins=[cur_spin],
                                     key=param_key)

            # Errors.
            if not hasattr(cur_spin, 'r2eff_err'):
                setattr(cur_spin, 'r2eff_err',
                        deepcopy(getattr(cur_spin, 'r2eff')))
            if not hasattr(cur_spin, 'i0_err'):
                setattr(cur_spin, 'i0_err', deepcopy(getattr(cur_spin, 'i0')))

            # Set error.
            cur_spin.r2eff_err[param_key] = r2eff_err
            cur_spin.i0_err[param_key] = i0_err

            # Chi-squared statistic.
            cur_spin.chi2 = chi2

            # Iterations.
            cur_spin.f_count = f_count

            # Warning.
            cur_spin.warning = warning

            # Print information.
            print_strings = []
            if E.verbosity >= 1:
                # Add print strings.
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (
                    exp_type, frq / 1E6, offset, point, len(times))
                print_strings.append(point_info)

                par_info = "r2eff=%3.3f r2eff_err=%3.4f, i0=%6.1f, i0_err=%3.4f, chi2=%3.3f.\n" % (
                    r2eff, r2eff_err, i0, i0_err, chi2)
                print_strings.append(par_info)

                if E.verbosity >= 2:
                    time_info = ', '.join(map(str, times))
                    print_strings.append('For time array: ' + time_info +
                                         '.\n\n')

            # Print info
            if len(print_strings) > 0:
                for print_string in print_strings:
                    print(print_string),
Example #16
0
def estimate_r2eff_err(spin_id=None, epsrel=0.0, verbosity=1):
    """This will estimate the R2eff and i0 errors from the covariance matrix Qxx.  Qxx is calculated from the Jacobian matrix and the optimised parameters.

    @keyword spin_id:       The spin identification string.
    @type spin_id:          str
    @param epsrel:          Any columns of R which satisfy |R_{kk}| <= epsrel |R_{11}| are considered linearly-dependent and are excluded from the covariance matrix, where the corresponding rows and columns of the covariance matrix are set to zero.
    @type epsrel:           float
    @keyword verbosity:     The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:        int
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError(
            "Relaxation curve fitting is not available.  Try compiling the C modules on your platform."
        )

    # Perform checks.
    check_model_type(model=MODEL_R2EFF)

    # Loop over the spins.
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(
            selection=spin_id, full_info=True, return_id=True,
            skip_desel=True):
        # Generate spin string.
        spin_string = generate_spin_string(spin=cur_spin,
                                           mol_name=mol_name,
                                           res_num=resi,
                                           res_name=resn)

        # Raise Error, if not optimised.
        if not (hasattr(cur_spin, 'r2eff') and hasattr(cur_spin, 'i0')):
            raise RelaxError(
                "Spin %s does not contain optimised 'r2eff' and 'i0' values.  Try execute: minimise.execute(min_algor='Newton', constraints=False)"
                % (spin_string))

        # Raise warning, if gradient count is 0.  This could point to a lack of minimisation first.
        if hasattr(cur_spin, 'g_count'):
            if getattr(cur_spin, 'g_count') == 0.0:
                text = "Spin %s contains a gradient count of 0.0.  Is the R2eff parameter optimised?  Try execute: minimise.execute(min_algor='Newton', constraints=False)" % (
                    spin_string)
                warn(RelaxWarning("%s." % text))

        # Print information.
        if verbosity >= 1:
            # Individual spin block section.
            top = 2
            if verbosity >= 2:
                top += 2
            subsection(file=sys.stdout,
                       text="Estimating R2eff error for spin: %s" %
                       spin_string,
                       prespace=top)

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point, ei, mi, oi, di in loop_exp_frq_offset_point(
                return_indices=True):
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type,
                                                   frq=frq,
                                                   offset=offset,
                                                   point=point)

            # Extract values.
            r2eff = getattr(cur_spin, 'r2eff')[param_key]
            i0 = getattr(cur_spin, 'i0')[param_key]

            # Pack data
            param_vector = [r2eff, i0]

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type,
                                  frq=frq,
                                  offset=offset,
                                  point=point):
                values.append(
                    average_intensity(spin=cur_spin,
                                      exp_type=exp_type,
                                      frq=frq,
                                      offset=offset,
                                      point=point,
                                      time=time))
                errors.append(
                    average_intensity(spin=cur_spin,
                                      exp_type=exp_type,
                                      frq=frq,
                                      offset=offset,
                                      point=point,
                                      time=time,
                                      error=True))
                times.append(time)

            # Convert to numpy array.
            values = asarray(values)
            errors = asarray(errors)
            times = asarray(times)

            # Initialise data in C code.
            scaling_list = [1.0, 1.0]
            model = Relax_fit_opt(model='exp',
                                  num_params=len(param_vector),
                                  values=values,
                                  errors=errors,
                                  relax_times=times,
                                  scaling_matrix=scaling_list)

            # Use the direct Jacobian from function.
            jacobian_matrix_exp = transpose(
                asarray(model.jacobian(param_vector)))
            weights = 1. / errors**2

            # Get the co-variance
            pcov = multifit_covar(J=jacobian_matrix_exp, weights=weights)

            # To compute one standard deviation errors on the parameters, take the square root of the diagonal covariance.
            param_vector_error = sqrt(diag(pcov))

            # Extract values.
            r2eff_err, i0_err = param_vector_error

            # Copy r2eff dictionary, to r2eff_err dictionary. They have same keys to the dictionary,
            if not hasattr(cur_spin, 'r2eff_err'):
                setattr(cur_spin, 'r2eff_err',
                        deepcopy(getattr(cur_spin, 'r2eff')))
            if not hasattr(cur_spin, 'i0_err'):
                setattr(cur_spin, 'i0_err', deepcopy(getattr(cur_spin, 'i0')))

            # Set error.
            cur_spin.r2eff_err[param_key] = r2eff_err
            cur_spin.i0_err[param_key] = i0_err

            # Get other relevant information.
            chi2 = getattr(cur_spin, 'chi2')

            # Print information.
            print_strings = []
            if verbosity >= 1:
                # Add print strings.
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (
                    exp_type, frq / 1E6, offset, point, len(times))
                print_strings.append(point_info)

                par_info = "r2eff=%3.3f r2eff_err=%3.4f, i0=%6.1f, i0_err=%3.4f, chi2=%3.3f.\n" % (
                    r2eff, r2eff_err, i0, i0_err, chi2)
                print_strings.append(par_info)

                if verbosity >= 2:
                    time_info = ', '.join(map(str, times))
                    print_strings.append('For time array: ' + time_info +
                                         '.\n\n')

            # Print info
            if len(print_strings) > 0:
                for print_string in print_strings:
                    print(print_string),
Example #17
0
def sherekhan_input(spin_id=None, force=False, dir='ShereKhan'):
    """Create the ShereKhan input files.

    @keyword spin_id:           The spin ID string to restrict the file creation to.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into the current directory.
    @type dir:                  str or None
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the experiment type has been set.
    if not hasattr(cdp, 'exp_type'):
        raise RelaxError("The relaxation dispersion experiment type has not been specified.")

    # Test if the model has been set.
    if not hasattr(cdp, 'model_type'):
        raise RelaxError("The relaxation dispersion model has not been specified.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # Loop over the spin blocks.
    cluster_index = 0
    for spin_ids in loop_cluster():
        # The spin containers.
        spins = spin_ids_to_containers(spin_ids)

        # Loop over the magnetic fields.
        for exp_type, frq, ei, mi in loop_exp_frq(return_indices=True):
            # Loop over the time, and count it.
            time_i = 0
            for time, ti in loop_time(exp_type=exp_type, frq=frq, return_indices=True):
                time_i += 1

            # Check that not more than one time point is returned.
            if time_i > 1:
                raise RelaxError("Number of returned time poins is %i. Only 1 time point is expected."%time_i)

            # The ShereKhan input file for the spin cluster.
            file_name = 'sherekhan_frq%s.in' % (mi+1)
            if dir != None:
                dir_name = dir + sep + 'cluster%s' % (cluster_index+1)
            else:
                dir_name = 'cluster%s' % (cluster_index+1)
            file = open_write_file(file_name=file_name, dir=dir_name, force=force)

            # The B0 field for the nuclei of interest in MHz (must be positive to be accepted by the server).
            file.write("%.10f\n" % abs(frq / periodic_table.gyromagnetic_ratio('1H') * periodic_table.gyromagnetic_ratio('15N') / 1e6))

            # The constant relaxation time for the CPMG experiment in seconds.
            file.write("%s\n" % (time))

            # The comment line.
            file.write("# %-18s %-20s %-20s\n" % ("nu_cpmg (Hz)", "R2eff (rad/s)", "Error"))

            # Loop over the spins of the cluster.
            for i in range(len(spins)):
                # Get the residue container.
                res = return_residue(spin_ids[i])

                # Name the residue if needed.
                res_name = res.name
                if res_name == None:
                    res_name = 'X'

                # Initialise the lines to output (to be able to catch missing data).
                lines = []

                # The residue ID line.
                lines.append("# %s%s\n" % (res_name, res.num))

                # Loop over the dispersion points.
                for offset, point in loop_offset_point(exp_type=exp_type, frq=frq, skip_ref=True):
                    # The parameter key.
                    param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

                    # No data.
                    if param_key not in spins[i].r2eff:
                        continue

                    # Store the data.
                    lines.append("%20.15g %20.13g %20.13g\n" % (point, spins[i].r2eff[param_key], spins[i].r2eff_err[param_key]))

                # No data.
                if len(lines) == 1:
                    continue

                # Write out the data.
                for line in lines:
                    file.write(line)

            # Close the file.
            file.close()

        # Increment the cluster index.
        cluster_index += 1
Example #18
0
def sherekhan_input(spin_id=None, force=False, dir='ShereKhan'):
    """Create the ShereKhan input files.

    @keyword spin_id:           The spin ID string to restrict the file creation to.
    @type spin_id:              str
    @keyword force:             A flag which if True will cause all pre-existing files to be overwritten.
    @type force:                bool
    @keyword dir:               The optional directory to place the files into.  If None, then the files will be placed into the current directory.
    @type dir:                  str or None
    """

    # Test if the current pipe exists.
    check_pipe()

    # Test if sequence data is loaded.
    if not exists_mol_res_spin_data():
        raise RelaxNoSequenceError

    # Test if the experiment type has been set.
    if not hasattr(cdp, 'exp_type'):
        raise RelaxError("The relaxation dispersion experiment type has not been specified.")

    # Test if the model has been set.
    if not hasattr(cdp, 'model_type'):
        raise RelaxError("The relaxation dispersion model has not been specified.")

    # Directory creation.
    if dir != None:
        mkdir_nofail(dir, verbosity=0)

    # Loop over the spin blocks.
    cluster_index = 0
    for spin_ids in loop_cluster():
        # The spin containers.
        spins = spin_ids_to_containers(spin_ids)

        # Loop over the magnetic fields.
        for exp_type, frq, ei, mi in loop_exp_frq(return_indices=True):
            # Loop over the time, and count it.
            time_i = 0
            for time, ti in loop_time(exp_type=exp_type, frq=frq, return_indices=True):
                time_i += 1

            # Check that not more than one time point is returned.
            if time_i > 1:
                raise RelaxError("Number of returned time poins is %i. Only 1 time point is expected."%time_i)

            # The ShereKhan input file for the spin cluster.
            file_name = 'sherekhan_frq%s.in' % (mi+1)
            if dir != None:
                dir_name = dir + sep + 'cluster%s' % (cluster_index+1)
            else:
                dir_name = 'cluster%s' % (cluster_index+1)
            file = open_write_file(file_name=file_name, dir=dir_name, force=force)

            # The B0 field for the nuclei of interest in MHz (must be positive to be accepted by the server).
            file.write("%.10f\n" % abs(frq / periodic_table.gyromagnetic_ratio('1H') * periodic_table.gyromagnetic_ratio('15N') / 1e6))

            # The constant relaxation time for the CPMG experiment in seconds.
            file.write("%s\n" % (time))

            # The comment line.
            file.write("# %-18s %-20s %-20s\n" % ("nu_cpmg (Hz)", "R2eff (rad/s)", "Error"))

            # Loop over the spins of the cluster.
            for i in range(len(spins)):
                # Get the residue container.
                res = return_residue(spin_ids[i])

                # Name the residue if needed.
                res_name = res.name
                if res_name == None:
                    res_name = 'X'

                # Initialise the lines to output (to be able to catch missing data).
                lines = []

                # The residue ID line.
                lines.append("# %s%s\n" % (res_name, res.num))

                # Loop over the dispersion points.
                for offset, point in loop_offset_point(exp_type=exp_type, frq=frq, skip_ref=True):
                    # The parameter key.
                    param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

                    # No data.
                    if param_key not in spins[i].r2eff:
                        continue

                    # Store the data.
                    lines.append("%20.15g %20.13g %20.13g\n" % (point, spins[i].r2eff[param_key], spins[i].r2eff_err[param_key]))

                # No data.
                if len(lines) == 1:
                    continue

                # Write out the data.
                for line in lines:
                    file.write(line)

            # Close the file.
            file.close()

        # Increment the cluster index.
        cluster_index += 1
        # Add key to dic.
        my_dic[spin_id][param_key] = OrderedDict()

        # Get the value.
        r2eff = getattr(cur_spin, 'r2eff')[param_key]
        my_dic[spin_id][param_key]['r2eff'] = r2eff
        #r2eff_err = getattr(cur_spin, 'r2eff_err')[param_key]
        i0 = getattr(cur_spin, 'i0')[param_key]
        #i0_err = getattr(cur_spin, 'i0_err')[param_key]
        my_dic[spin_id][param_key]['i0'] = i0

        if do_boot:
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                values.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time))
                errors.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                times.append(time)

            # Convert to numpy array.
            values = asarray(values)
            errors = asarray(errors)
            times = asarray(times)

            R_m_sim_l = []
            I0_m_sim_l = []
            for j in range(sim_boot):
                if j in range(0, 100000, 100):
                    print("Simulation %i"%j)
                # Start minimisation.
Example #20
0
def estimate_r2eff(method='minfx', min_algor='simplex', c_code=True, constraints=False, chi2_jacobian=False, spin_id=None, ftol=1e-15, xtol=1e-15, maxfev=10000000, factor=100.0, verbosity=1):
    """Estimate r2eff and errors by exponential curve fitting with scipy.optimize.leastsq or minfx.

    THIS IS ONLY FOR TESTING.

    scipy.optimize.leastsq is a wrapper around MINPACK's lmdif and lmder algorithms.

    MINPACK is a FORTRAN90 library which solves systems of nonlinear equations, or carries out the least squares minimization of the residual of a set of linear or nonlinear equations.

    Errors are calculated by taking the square root of the reported co-variance.

    This can be an huge time saving step, when performing model fitting in R1rho.
    Errors of R2eff values, are normally estimated by time-consuming Monte-Carlo simulations.

    Initial guess for the starting parameter x0 = [r2eff_est, i0_est], is by converting the exponential curve to a linear problem.
    Then solving initial guess by linear least squares of: ln(Intensity[j]) = ln(i0) - time[j]* r2eff.


    @keyword method:            The method to minimise and estimate errors.  Options are: 'minfx' or 'scipy.optimize.leastsq'.
    @type method:               string
    @keyword min_algor:         The minimisation algorithm
    @type min_algor:            string
    @keyword c_code:            If optimise with C code.
    @type c_code:               bool
    @keyword constraints:       If constraints should be used.
    @type constraints:          bool
    @keyword chi2_jacobian:     If the chi2 Jacobian should be used.
    @type chi2_jacobian:        bool
    @keyword spin_id:           The spin identification string.
    @type spin_id:              str
    @keyword ftol:              The function tolerance for the relative error desired in the sum of squares, parsed to leastsq.
    @type ftol:                 float
    @keyword xtol:              The error tolerance for the relative error desired in the approximate solution, parsed to leastsq.
    @type xtol:                 float
    @keyword maxfev:            The maximum number of function evaluations, parsed to leastsq.  If zero, then 100*(N+1) is the maximum function calls.  N is the number of elements in x0=[r2eff, i0].
    @type maxfev:               int
    @keyword factor:            The initial step bound, parsed to leastsq.  It determines the initial step bound (''factor * || diag * x||'').  Should be in the interval (0.1, 100).
    @type factor:               float
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    """

    # Perform checks.
    check_model_type(model=MODEL_R2EFF)

    # Check that the C modules have been compiled.
    if not C_module_exp_fn and method == 'minfx':
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Set class scipy setting.
    E = Exp(verbosity=verbosity)
    E.set_settings_leastsq(ftol=ftol, xtol=xtol, maxfev=maxfev, factor=factor)

    # Check if intensity errors have already been calculated by the user.
    precalc = True
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(selection=spin_id, full_info=True, return_id=True, skip_desel=True):
        # No structure.
        if not hasattr(cur_spin, 'peak_intensity_err'):
            precalc = False
            break

        # Determine if a spectrum ID is missing from the list.
        for id in cdp.spectrum_ids:
            if id not in cur_spin.peak_intensity_err:
                precalc = False
                break

    # Loop over the spins.
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(selection=spin_id, full_info=True, return_id=True, skip_desel=True):
        # Generate spin string.
        spin_string = generate_spin_string(spin=cur_spin, mol_name=mol_name, res_num=resi, res_name=resn)

        # Print information.
        if E.verbosity >= 1:
            # Individual spin block section.
            top = 2
            if E.verbosity >= 2:
                top += 2
            subsection(file=sys.stdout, text="Fitting with %s to: %s"%(method, spin_string), prespace=top)
            if method == 'minfx':
                subsection(file=sys.stdout, text="min_algor='%s', c_code=%s, constraints=%s, chi2_jacobian?=%s"%(min_algor, c_code, constraints, chi2_jacobian), prespace=0)

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point, ei, mi, oi, di in loop_exp_frq_offset_point(return_indices=True):
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                values.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time))
                errors.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                times.append(time)

            # Convert to numpy array.
            values = asarray(values)
            errors = asarray(errors)
            times = asarray(times)

            # Initialise data.
            E.setup_data(values=values, errors=errors, times=times)

            # Get the result based on method.
            if method == 'scipy.optimize.leastsq':
                # Acquire results.
                results = minimise_leastsq(E=E)

            elif method == 'minfx':
                # Set settings.
                E.set_settings_minfx(min_algor=min_algor, c_code=c_code, chi2_jacobian=chi2_jacobian, constraints=constraints)

                # Acquire results.
                results = minimise_minfx(E=E)
            else:
                raise RelaxError("Method for minimisation not known. Try setting: method='scipy.optimize.leastsq'.")

            # Unpack results
            param_vector, param_vector_error, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Extract values.
            r2eff = param_vector[0]
            i0 = param_vector[1]
            r2eff_err = param_vector_error[0]
            i0_err = param_vector_error[1]

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spins=[cur_spin], key=param_key)

            # Errors.
            if not hasattr(cur_spin, 'r2eff_err'):
                setattr(cur_spin, 'r2eff_err', deepcopy(getattr(cur_spin, 'r2eff')))
            if not hasattr(cur_spin, 'i0_err'):
                setattr(cur_spin, 'i0_err', deepcopy(getattr(cur_spin, 'i0')))

            # Set error.
            cur_spin.r2eff_err[param_key] = r2eff_err
            cur_spin.i0_err[param_key] = i0_err

            # Chi-squared statistic.
            cur_spin.chi2 = chi2

            # Iterations.
            cur_spin.f_count = f_count

            # Warning.
            cur_spin.warning = warning

            # Print information.
            print_strings = []
            if E.verbosity >= 1:
                # Add print strings.
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (exp_type, frq/1E6, offset, point, len(times))
                print_strings.append(point_info)

                par_info = "r2eff=%3.3f r2eff_err=%3.4f, i0=%6.1f, i0_err=%3.4f, chi2=%3.3f.\n" % ( r2eff, r2eff_err, i0, i0_err, chi2)
                print_strings.append(par_info)

                if E.verbosity >= 2:
                    time_info = ', '.join(map(str, times))
                    print_strings.append('For time array: '+time_info+'.\n\n')

            # Print info
            if len(print_strings) > 0:
                for print_string in print_strings:
                    print(print_string),