Exemple #1
0
    def pre_run_parameters(self, model=None, model_path=None):
        """Copy parameters from an earlier analysis.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout.
        subsection(file=sys.stdout, text="Pre-run parameters", prespace=1)

        # The data pipe name.
        pipe_name = self.name_pipe('pre')

        # Create a temporary data pipe for the previous run.
        self.interpreter.pipe.create(pipe_name=pipe_name, pipe_type='relax_disp')

        # Load the previous results.
        path = self.pre_run_dir + sep + model_path
        self.interpreter.results.read(file='results', dir=path)

        # Force copy of the R2eff values.
        if model == MODEL_R2EFF:
            self.interpreter.value.copy(pipe_from=pipe_name, pipe_to=self.name_pipe(model), param='r2eff', force=True)

        # Copy the parameters.
        self.interpreter.relax_disp.parameter_copy(pipe_from=pipe_name, pipe_to=self.name_pipe(model))

        # Finally, switch back to the original data pipe and delete the temporary one.
        self.interpreter.pipe.switch(pipe_name=self.name_pipe(model))
        self.interpreter.pipe.delete(pipe_name=pipe_name)
Exemple #2
0
    def pre_run_parameters(self, model=None, model_path=None):
        """Copy parameters from an earlier analysis.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout.
        subsection(file=sys.stdout, text="Pre-run parameters", prespace=1)

        # The data pipe name.
        pipe_name = self.name_pipe('pre')

        # Create a temporary data pipe for the previous run.
        self.interpreter.pipe.create(pipe_name=pipe_name, pipe_type='relax_disp')

        # Load the previous results.
        path = self.pre_run_dir + sep + model_path
        self.interpreter.results.read(file='results', dir=path)

        # Force copy of the R2eff values.
        if model == MODEL_R2EFF:
            self.interpreter.value.copy(pipe_from=pipe_name, pipe_to=self.name_pipe(model), param='r2eff', force=True)

        # Copy the parameters.
        self.interpreter.relax_disp.parameter_copy(pipe_from=pipe_name, pipe_to=self.name_pipe(model))

        # Finally, switch back to the original data pipe and delete the temporary one.
        self.interpreter.pipe.switch(pipe_name=self.name_pipe(model))
        self.interpreter.pipe.delete(pipe_name=pipe_name)
    def visualisation(self, model=None):
        """Create visual representations of the frame order results for the given model.

        This includes a PDB representation of the motions (the 'cone.pdb' file located in each model directory) together with a relax script for displaying the average domain positions together with the cone/motion representation in PyMOL (the 'pymol_display.py' file, also created in the model directory).

        @keyword model:     The frame order model to visualise.  This should match the model of the current data pipe, unless the special value of 'final' is used to indicate the visualisation of the final results.
        @type model:        str
        """

        # Sanity check.
        if model != 'final' and model != cdp.model:
            raise RelaxError("The model '%s' does not match the model '%s' of the current data pipe." % (model, cdp.model))

        # The PDB representation of the model.
        self.interpreter.frame_order.pdb_model(dir=self.results_dir+model, force=True)

        # Create the visualisation script.
        subsection(file=sys.stdout, text="Creating a PyMOL visualisation script.")
        script = open_write_file(file_name='pymol_display.py', dir=self.results_dir+model, force=True)

        # Add a comment for the user.
        script.write("# relax script for displaying the frame order results of this '%s' model in PyMOL.\n\n" % model)

        # The script contents.
        script.write("# PyMOL visualisation.\n")
        script.write("pymol.view()\n")
        script.write("pymol.command('show spheres')\n")
        script.write("pymol.frame_order(file='frame_order.pdb', dist_file='frame_order_distribution.pdb')\n")

        # Close the file.
        script.close()
Exemple #4
0
    def run(self, processor, completed):
        """Setup and perform the model-free optimisation."""

        # Initialise the function to minimise.
        self.mf = Mf(init_params=self.opt_params.param_vector,
                     model_type=self.data.model_type,
                     diff_type=self.data.diff_type,
                     diff_params=self.data.diff_params,
                     scaling_matrix=self.data.scaling_matrix,
                     num_spins=self.data.num_spins,
                     equations=self.data.equations,
                     param_types=self.data.param_types,
                     param_values=self.data.param_values,
                     relax_data=self.data.ri_data,
                     errors=self.data.ri_data_err,
                     bond_length=self.data.r,
                     csa=self.data.csa,
                     num_frq=self.data.num_frq,
                     frq=self.data.frq,
                     num_ri=self.data.num_ri,
                     remap_table=self.data.remap_table,
                     noe_r1_table=self.data.noe_r1_table,
                     ri_labels=self.data.ri_types,
                     gx=self.data.gx,
                     gh=self.data.gh,
                     h_bar=self.data.h_bar,
                     mu0=self.data.mu0,
                     num_params=self.data.num_params,
                     vectors=self.data.xh_unit_vectors)

        # Printout.
        if self.opt_params.verbosity >= 1 and (self.data.model_type == 'mf'
                                               or self.data.model_type
                                               == 'local_tm'):
            subsection(file=sys.stdout,
                       text="Optimisation:  Spin '%s'" % self.data.spin_id,
                       prespace=2,
                       postspace=0)

        # Preform optimisation.
        results = self.optimise()

        # Disassemble the results list.
        param_vector, func, iter, fc, gc, hc, warning = results

        processor.return_object(
            MF_result_command(processor,
                              self.memo_id,
                              param_vector,
                              func,
                              iter,
                              fc,
                              gc,
                              hc,
                              warning,
                              completed=False))
Exemple #5
0
    def _print_model_title_spin(self, prefix=None, model_info=None):
        """Default method for when the model_loop() method simply loops over spins.

        @keyword prefix:        The starting text of the title.  This should be printed out first, followed by the model information text.
        @type prefix:           str
        @keyword model_info:    The spin container and the spin ID string from the _model_loop_spin() method.
        @type model_info:       SpinContainer instance, str
        """

        # The printout.
        spin_id = model_info[1]
        text = prefix + "The spin %s" % spin_id
        subsection(file=sys.stdout, text=text, prespace=2)
Exemple #6
0
    def _print_model_title_spin(self, prefix=None, model_info=None):
        """Default method for when the model_loop() method simply loops over spins.

        @keyword prefix:        The starting text of the title.  This should be printed out first, followed by the model information text.
        @type prefix:           str
        @keyword model_info:    The spin container and the spin ID string from the _model_loop_spin() method.
        @type model_info:       SpinContainer instance, str
        """

        # The printout.
        spin_id = model_info[1]
        text = prefix + "The spin %s" % spin_id
        subsection(file=sys.stdout, text=text, prespace=2)
    def run(self, processor, completed):
        """Set up and perform the optimisation."""

        # Print out.
        if self.verbosity >= 1:
            # Individual spin block section.
            top = 2
            if self.verbosity >= 2:
                top += 2
            subsection(file=sys.stdout, text="Fitting to the spin block %s"%self.spin_ids, prespace=top)

            # Grid search printout.
            if search('^[Gg]rid', self.min_algor):
                print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % self.grid_size)

        # Initialise the function to minimise.
        model = Dispersion(model=self.spins[0].model, num_params=self.param_num, num_spins=len(self.spins), num_frq=len(self.fields), exp_types=self.exp_types, values=self.values, errors=self.errors, missing=self.missing, frqs=self.frqs, frqs_H=self.frqs_H, cpmg_frqs=self.cpmg_frqs, spin_lock_nu1=self.spin_lock_nu1, chemical_shifts=self.chemical_shifts, offset=self.offsets, tilt_angles=self.tilt_angles, r1=self.r1, relax_times=self.relax_times, scaling_matrix=self.scaling_matrix)

        # Grid search.
        if search('^[Gg]rid', self.min_algor):
            results = grid(func=model.func, args=(), num_incs=self.inc_new, lower=self.lower_new, upper=self.upper_new, A=self.A, b=self.b, verbosity=self.verbosity)

            # Unpack the results.
            param_vector, chi2, iter_count, warning = results
            f_count = iter_count
            g_count = 0.0
            h_count = 0.0

        # Minimisation.
        else:
            results = generic_minimise(func=model.func, args=(), x0=self.param_vector, min_algor=self.min_algor, min_options=self.min_options, func_tol=self.func_tol, grad_tol=self.grad_tol, maxiter=self.max_iterations, A=self.A, b=self.b, full_output=True, print_flag=self.verbosity)

            # Unpack the results.
            if results == None:
                return
            param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

        # Optimisation printout.
        if self.verbosity:
            print("\nOptimised parameter values:")
            for i in range(len(param_vector)):
                print("%-20s %25.15f" % (self.param_names[i], param_vector[i]*self.scaling_matrix[i, i]))

        # Printout.
        if self.sim_index != None:
            print("Simulation %s, cluster %s" % (self.sim_index+1, self.spin_ids))

        # Create the result command object to send back to the master.
        processor.return_object(Disp_result_command(processor=processor, memo_id=self.memo_id, param_vector=param_vector, chi2=chi2, iter_count=iter_count, f_count=f_count, g_count=g_count, h_count=h_count, warning=warning, missing=self.missing, back_calc=model.back_calc, completed=False))
Exemple #8
0
    def run(self, processor, completed):
        """Set up and perform the optimisation."""

        # Print out.
        if self.verbosity >= 1:
            # Individual spin block section.
            top = 2
            if self.verbosity >= 2:
                top += 2
            subsection(file=sys.stdout, text="Fitting to the spin block %s"%self.spin_ids, prespace=top)

            # Grid search printout.
            if search('^[Gg]rid', self.min_algor):
                result = 1
                for x in self.inc:
                    result = mul(result, x)
                print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % result)

        # Initialise the function to minimise.
        model = Dispersion(model=self.spins[0].model, num_params=self.param_num, num_spins=count_spins(self.spins), num_frq=len(self.fields), exp_types=self.exp_types, values=self.values, errors=self.errors, missing=self.missing, frqs=self.frqs, frqs_H=self.frqs_H, cpmg_frqs=self.cpmg_frqs, spin_lock_nu1=self.spin_lock_nu1, chemical_shifts=self.chemical_shifts, offset=self.offsets, tilt_angles=self.tilt_angles, r1=self.r1, relax_times=self.relax_times, scaling_matrix=self.scaling_matrix, r1_fit=self.r1_fit)

        # Grid search.
        if search('^[Gg]rid', self.min_algor):
            results = grid(func=model.func, args=(), num_incs=self.inc, lower=self.lower, upper=self.upper, A=self.A, b=self.b, verbosity=self.verbosity)

            # Unpack the results.
            param_vector, chi2, iter_count, warning = results
            f_count = iter_count
            g_count = 0.0
            h_count = 0.0

        # Minimisation.
        else:
            results = generic_minimise(func=model.func, args=(), x0=self.param_vector, min_algor=self.min_algor, min_options=self.min_options, func_tol=self.func_tol, grad_tol=self.grad_tol, maxiter=self.max_iterations, A=self.A, b=self.b, full_output=True, print_flag=self.verbosity)

            # Unpack the results.
            if results == None:
                return
            param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

        # Optimisation printout.
        if self.verbosity:
            print("\nOptimised parameter values:")
            for i in range(len(param_vector)):
                print("%-20s %25.15f" % (self.param_names[i], param_vector[i]*self.scaling_matrix[i, i]))

        # Create the result command object to send back to the master.
        processor.return_object(Disp_result_command(processor=processor, memo_id=self.memo_id, param_vector=param_vector, chi2=chi2, iter_count=iter_count, f_count=f_count, g_count=g_count, h_count=h_count, warning=warning, missing=self.missing, back_calc=model.get_back_calc(), completed=False))
Exemple #9
0
    def run(self, processor, completed):
        """Setup and perform the model-free optimisation."""

        # Initialise the function to minimise.
        self.mf = Mf(
            init_params=self.opt_params.param_vector,
            model_type=self.data.model_type,
            diff_type=self.data.diff_type,
            diff_params=self.data.diff_params,
            scaling_matrix=self.data.scaling_matrix,
            num_spins=self.data.num_spins,
            equations=self.data.equations,
            param_types=self.data.param_types,
            param_values=self.data.param_values,
            relax_data=self.data.ri_data,
            errors=self.data.ri_data_err,
            bond_length=self.data.r,
            csa=self.data.csa,
            num_frq=self.data.num_frq,
            frq=self.data.frq,
            num_ri=self.data.num_ri,
            remap_table=self.data.remap_table,
            noe_r1_table=self.data.noe_r1_table,
            ri_labels=self.data.ri_types,
            gx=self.data.gx,
            gh=self.data.gh,
            h_bar=self.data.h_bar,
            mu0=self.data.mu0,
            num_params=self.data.num_params,
            vectors=self.data.xh_unit_vectors,
        )

        # Printout.
        if self.opt_params.verbosity >= 1 and (self.data.model_type == "mf" or self.data.model_type == "local_tm"):
            subsection(file=sys.stdout, text="Optimisation:  Spin '%s'" % self.data.spin_id, prespace=2, postspace=0)

        # Preform optimisation.
        results = self.optimise()

        # Disassemble the results list.
        param_vector, func, iter, fc, gc, hc, warning = results

        processor.return_object(
            MF_result_command(processor, self.memo_id, param_vector, func, iter, fc, gc, hc, warning, completed=False)
        )
Exemple #10
0
    def _print_model_title_global(self, prefix=None, model_info=None):
        """Default method for when the model_loop() method simply loops over a single global model.

        @keyword prefix:        The starting text of the title.  This should be printed out first, followed by the model information text.
        @type prefix:           str
        @keyword model_info:    The model information from _model_loop_single_global().  This should be zero for the single global model.
        @type model_info:       int
        """

        # Create the text from the prefix and model info.
        text = ''
        if prefix:
            text += prefix
        else:
            text += 'Model '
        text += repr(model_info)

        # The printout.
        subsection(file=sys.stdout, text=text, prespace=2)
Exemple #11
0
    def _print_model_title_global(self, prefix=None, model_info=None):
        """Default method for when the model_loop() method simply loops over a single global model.

        @keyword prefix:        The starting text of the title.  This should be printed out first, followed by the model information text.
        @type prefix:           str
        @keyword model_info:    The model information from _model_loop_single_global().  This should be zero for the single global model.
        @type model_info:       int
        """

        # Create the text from the prefix and model info.
        text = ""
        if prefix:
            text += prefix
        else:
            text += "Model "
        text += repr(model_info)

        # The printout.
        subsection(file=sys.stdout, text=text, prespace=2)
Exemple #12
0
    def test_subsection(self):
        """Test of the lib.text.sectioning.subsection() function."""

        # Write out the subsection.
        file = DummyFileObject()
        subsection(file=file, text='Test subsection')

        # Read the results.
        lines = file.readlines()
        print("Formatted subsection lines:  %s" % lines)

        # Check the title.
        real_lines = [
            '\n',
            'Test subsection\n',
            '---------------\n',
            '\n',
        ]
        self.assertEqual(len(lines), len(real_lines))
        for i in range(len(lines)):
            self.assertEqual(lines[i], real_lines[i])
Exemple #13
0
    def pre_run_parameters(self, model=None):
        """Copy parameters from an earlier analysis.

        @keyword model: The model to be optimised.
        @type model:    str
        """

        # Printout.
        subsection(file=sys.stdout, text="Pre-run parameters", prespace=1)

        # Create a temporary data pipe for the previous run.
        self.interpreter.pipe.create(pipe_name='pre', pipe_type='relax_disp')

        # Load the previous results.
        path = self.pre_run_dir + sep + model
        self.interpreter.results.read(file='results', dir=path)

        # Copy the parameters.
        self.interpreter.relax_disp.parameter_copy(pipe_from='pre', pipe_to=model)

        # Finally, switch back to the original data pipe and delete the temporary one.
        self.interpreter.pipe.switch(pipe_name=model)
        self.interpreter.pipe.delete(pipe_name='pre')
Exemple #14
0
def copy(pipe_from=None, pipe_to=None):
    """Copy dispersion parameters from one data pipe to another, taking the median of previous values to a start value for clusters.
    Taking the median prevent averaging extreme outliers.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    """

    # The current data pipe.
    pipe_orig = pipes.cdp_name()
    if pipe_from == None:
        pipe_from = pipe_orig
    if pipe_to == None:
        pipe_to = pipe_orig

    # Test that the pipes exist.
    check_pipe(pipe_from)
    check_pipe(pipe_to)

    # Test that the pipes are not the same.
    if pipe_from == pipe_to:
        raise RelaxError("The source and destination pipes cannot be the same.")

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # Switch to the destination data pipe.
    pipes.switch(pipe_to)

    # Loop over the clusters.
    for spin_ids in loop_cluster():
        # Initialise some variables.
        model = None
        pA = []
        pB = []
        pC = []
        kex = []
        kex_AB = []
        kex_AC = []
        kex_BC = []
        k_AB = []
        kB = []
        kC = []
        tex = []
        count = 0
        spins_from = []
        spins_to = []
        selected_cluster = False

        # Loop over the spins, adding parameters to a list, which in the end will be used to find the median.
        for id in spin_ids:
            # Get the spins, then store them.
            spin_from = return_spin(spin_id=id, pipe=pipe_from)
            spin_to = return_spin(spin_id=id, pipe=pipe_to)
            spins_from.append(spin_from)
            spins_to.append(spin_to)

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The first printout.
            if not selected_cluster:
                subsection(file=sys.stdout, text="Copying parameters for the spin block %s"%spin_ids, prespace=2)

            # Change the cluster selection flag.
            selected_cluster = True

            # The model.
            if not model:
                model = spin_from.model

            # Check that the models match for all spins of the cluster.
            if spin_from.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the source data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))
            if spin_to.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the destination data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))

            # Sum the source parameters.
            if 'pA' in spin_from.params:
                pA.append(spin_from.pA)
            if 'pB' in spin_from.params:
                pB.append(spin_from.pB)
            if 'pC' in spin_from.params:
                pC.append(spin_from.pC)
            if 'kex' in spin_from.params:
                kex.append(spin_from.kex)
            if 'kex_AB' in spin_from.params:
                kex_AB.append(spin_from.kex_AB)
            if 'kex_AC' in spin_from.params:
                kex_AC.append(spin_from.kex_AC)
            if 'kex_BC' in spin_from.params:
                kex_BC.append(spin_from.kex_BC)
            if 'k_AB' in spin_from.params:
                k_AB.append(spin_from.k_AB)
            if 'kB' in spin_from.params:
                kB.append(spin_from.kB)
            if 'kC' in spin_from.params:
                kC.append(spin_from.kC)
            if 'tex' in spin_from.params:
                tex.append(spin_from.tex)

            # Increment the spin count.
            count += 1

        # The cluster is not selected, so move to the next.
        if not selected_cluster:
            continue

        # Take median of parameters.
        if len(pA) > 0:
            pA = median(pA)
            print("Median pA value:  %.15f" % pA)
        if len(pB) > 0:
            pB = median(pB)
            print("Median pB value:  %.15f" % pB)
        if len(pC) > 0:
            pC = median(pC)
            print("Median pC value:  %.15f" % pC)
        if len(kex) > 0:
            kex = median(kex)
            print("Median kex value: %.15f" % kex)
        if len(kex_AB) > 0:
            kex_AB = median(kex_AB)
            print("Median k_AB value: %.15f" % kex_AB)
        if len(kex_AC) > 0:
            kex_AC = median(kex_AC)
            print("Median k_AC value: %.15f" % kex_AC)
        if len(kex_BC) > 0:
            kex_BC = median(kex_BC)
            print("Median k_BC value: %.15f" % kex_BC)
        if len(k_AB) > 0:
            k_AB = median(k_AB)
            print("Median k_AB value: %.15f" % k_AB)
        if len(kB) > 0:
            kB = median(kB)
            print("Median kB value:  %.15f" % kB)
        if len(kC) > 0:
            kC = median(kC)
            print("Median kC value:  %.15f" % kC)
        if len(tex) > 0:
            tex = median(tex)
            print("Median tex value: %.15f" % tex)

        # Loop over the spins, this time copying the parameters.
        for i in range(len(spin_ids)):
            # Alias the containers.
            spin_from = spins_from[i]
            spin_to = spins_to[i]

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The R20 parameters.
            if 'r2' in spin_from.params:
                spin_to.r2 = deepcopy(spin_from.r2)
            if 'r2a' in spin_from.params:
                spin_to.r2a = deepcopy(spin_from.r2a)
            if 'r2b' in spin_from.params:
                spin_to.r2b = deepcopy(spin_from.r2b)

            # The median parameters.
            if 'pB' in spin_from.params and 'pC' not in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = pB
                spin_to.pC = 1.0 - pA - pB
            elif 'pA' in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = 1.0 - pA
            if 'kex' in spin_from.params:
                spin_to.kex = kex
            if 'kex_AB' in spin_from.params:
                spin_to.kex_AB = kex_AB
            if 'kex_AC' in spin_from.params:
                spin_to.kex_AC = kex_AC
            if 'kex_BC' in spin_from.params:
                spin_to.kex_BC = kex_BC
            if 'k_AB' in spin_from.params:
                spin_to.k_AB = k_AB
            if 'kB' in spin_from.params:
                spin_to.kB = kB
            if 'kC' in spin_from.params:
                spin_to.kC = kC
            if 'tex' in spin_from.params:
                spin_to.tex = tex

            # All other spin specific parameters.
            for param in spin_from.params:
                if param in ['r2', 'r2a', 'r2b', 'pA', 'pB', 'pC', 'kex', 'kex_AB', 'kex_AC', 'kex_BC', 'k_AB', 'kB', 'kC', 'tex']:
                    continue

                # Copy the value.
                setattr(spin_to, param, deepcopy(getattr(spin_from, param)))

    # Switch back to the original data pipe.
    pipes.switch(pipe_orig)
Exemple #15
0
def sn_ratio_deselection(ratio=10.0, operation='<', all_sn=False, select=False, verbose=True):
    """Use user function deselect.spin on spins with signal to noise ratio higher or lower than ratio.  The operation determines the selection operation.

    @keyword ratio:         The ratio to compare to.
    @type ratio:            float
    @keyword operation:     The comparison operation by which to select the spins.  Of the operation(sn_ratio, ratio), where operation can either be:  '<', '<=', '>', '>=', '==', '!='.
    @type operation:        str
    @keyword all_sn:        A flag specifying if all the signal to noise ratios per spin should match the comparison operator, of if just a single comparison match is enough.
    @type all_sn:           bool
    @keyword select:        A flag specifying if the user function select.spin should be used instead.
    @type select:           bool
    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Assign the comparison operator.
    # "'<' : strictly less than"
    if operation == '<':
        op = operator.lt

    # "'<=' : less than or equal"
    elif operation == '<=':
        op = operator.le

    # "'>' : strictly greater than"
    elif operation == '>':
        op = operator.gt

    # "'>=' : greater than or equal"
    elif operation == '>=':
        op = operator.ge

    # "'==' : equal"
    elif operation == '==':
        op = operator.eq

    # "'!=' : not equal",
    elif operation == '!=':
        op = operator.ne

    # If not assigned, raise error.
    else:
        raise RelaxError("The compare operation does not belong to the allowed list of methods: ['<', '<=', '>', '>=', '==', '!=']")

    # Assign text for print out.
    if all_sn:
        text_all_sn = "all"
    else:
        text_all_sn = "any"

    if select:
        text_sel = "selected"
        sel_func = sel_spin
    else:
        text_sel = "deselected"
        sel_func = desel_spin

    # Print
    section(file=sys.stdout, text="Signal to noise ratio comparison selection", prespace=1, postspace=0)
    print("For the comparion test: S/N %s %1.1f"%(operation, ratio))

    # Loop over the spins.
    spin_ids = []
    for spin, spin_id in spin_loop(return_id=True):
        # Skip spins missing sn_ratio.
        if not hasattr(spin, 'sn_ratio'):
            # Skip warning for deselected spins.
            if spin.select:
                warn(RelaxWarning("Spin '%s' does not contain Signal to Noise calculations. Perform the user function 'spectrum.sn_ratio'. This spin is skipped." % spin_id))
            continue

        # Loop over the ID, collect and sort.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Loop over the sorted ids.
        sn_val = []
        for id in ids:
            # Append the Signal to Noise in the list.
            sn_val.append(spin.sn_ratio[id])

        # Convert the list to array.
        sn_val = asarray(sn_val)

        # Make the comparison for the whole array.
        test_arr = op(sn_val, ratio)

        # Determine how the test should evaluate.
        if all_sn:
            test = test_arr.all()
        else:
            test = test_arr.any()

        # Make an numpy array for the ids, an extract id which failed the test.
        ids_arr = asarray(ids)
        ids_test_arr = ids_arr[test_arr]

        # Make inversion of bool
        test_arr_inv = test_arr == False
        ids_test_arr_inv = ids_arr[test_arr_inv]

        # print
        if verbose:
            subsection(file=sys.stdout, text="Signal to noise ratio comparison for spin ID '%s'"%spin_id, prespace=1, postspace=0)
            print("Following spectra ID evaluated to True: %s"%ids_test_arr)
            print("Following spectra ID evaluated to False: %s"%ids_test_arr_inv)
            print("'%s' comparisons have been used for evaluation, which evaluated to: %s"%(text_all_sn, test))
            if test:
                print("The spin ID '%s' is %s"%(spin_id, text_sel))
            else:
                print("The spin ID '%s' is skipped"%spin_id)

        # If the test evaluates to True, then do selection action.
        if test:
            # Select/Deselect the spin.
            sel_func(spin_id=spin_id)

            # Assign spin_id to list, for printing.
            spin_ids.append(spin_id)

    # Make summary
    if verbose:
        if len(spin_ids) > 0:
            subsection(file=sys.stdout, text="For all of the S/N comparion test, the following spin ID's was %s"%text_sel, prespace=1, postspace=0)
            print(spin_ids)
Exemple #16
0
def minimise_r2eff(spins=None, spin_ids=None, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
    """Optimise the R2eff model by fitting the 2-parameter exponential curves.

    This mimics the R1 and R2 relax_fit analysis.


    @keyword spins:             The list of spins for the cluster.
    @type spins:                list of SpinContainer instances
    @keyword spin_ids:          The list of spin IDs for the cluster.
    @type spin_ids:             list of str
    @keyword min_algor:         The minimisation algorithm to use.
    @type min_algor:            str
    @keyword min_options:       An array of options to be used by the minimisation algorithm.
    @type min_options:          array of str
    @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type func_tol:             None or float
    @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type grad_tol:             None or float
    @keyword max_iterations:    The maximum number of iterations for the algorithm.
    @type max_iterations:       int
    @keyword constraints:       If True, constraints are used during optimisation.
    @type constraints:          bool
    @keyword scaling_matrix:    The diagonal and square scaling matrix.
    @type scaling_matrix:       numpy rank-2, float64 array or None
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
    @type sim_index:            None or int
    @keyword lower:             The model specific lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type lower:                list of numbers
    @keyword upper:             The model specific upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type upper:                list of numbers
    @keyword inc:               The model specific increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
    @type inc:                  list of int
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Loop over the spins.
    for si in range(len(spins)):
        # Skip deselected spins.
        if not spins[si].select:
            continue

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point in loop_exp_frq_offset_point():
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

            # The initial parameter vector.
            param_vector = assemble_param_vector(spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Diagonal scaling.
            if scaling_matrix is not None:
                param_vector = dot(inv(scaling_matrix), param_vector)

            # Linear constraints.
            A, b = None, None
            if constraints:
                A, b = linear_constraints(spins=[spins[si]], scaling_matrix=scaling_matrix)

            # Print out.
            if verbosity >= 1:
                # Individual spin section.
                top = 2
                if verbosity >= 2:
                    top += 2
                text = "Fitting to spin %s, frequency %s and dispersion point %s" % (spin_ids[si], frq, point)
                subsection(file=sys.stdout, text=text, prespace=top)

                # Grid search printout.
                if match('^[Gg]rid', min_algor):
                    result = 1
                    for x in inc:
                        result = mul(result, x)
                    print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % result)

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                values.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, sim_index=sim_index))
                errors.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                times.append(time)

            # Raise errors if number of time points is less than 2.
            if len(times) < 3:
                subsection(file=sys.stdout, text="Exponential curve fitting error for point:", prespace=2)
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (exp_type, frq/1E6, offset, point, len(times))
                print(point_info)
                raise RelaxError("The data setup points to exponential curve fitting, but only %i time points was found, where 3 time points is minimum.  If calculating R2eff values for fixed relaxation time period data, check that a reference intensity has been specified for each offset value."%(len(times)))

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            if scaling_matrix is None:
                for i in range(len(param_vector)):
                    scaling_list.append(1.0)
            else:
                for i in range(len(scaling_matrix)):
                    scaling_list.append(scaling_matrix[i, i])

            # Initialise the function to minimise.
            model = Relax_fit_opt(model='exp', num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling_matrix is not None:
                param_vector = dot(scaling_matrix, param_vector)

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spins[si].chi2_sim[sim_index] = chi2

                # Iterations.
                spins[si].iter_sim[sim_index] = iter_count

                # Function evaluations.
                spins[si].f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spins[si].g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spins[si].h_count_sim[sim_index] = h_count

                # Warning.
                spins[si].warning_sim[sim_index] = warning

            # Normal statistics.
            else:
                # Chi-squared statistic.
                spins[si].chi2 = chi2

                # Iterations.
                spins[si].iter = iter_count

                # Function evaluations.
                spins[si].f_count = f_count

                # Gradient evaluations.
                spins[si].g_count = g_count

                # Hessian evaluations.
                spins[si].h_count = h_count

                # Warning.
                spins[si].warning = warning
Exemple #17
0
def copy(pipe_from=None, pipe_to=None):
    """Copy dispersion parameters from one data pipe to another, taking the median of previous values to a start value for clusters.
    Taking the median prevent averaging extreme outliers.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    """

    # The current data pipe.
    pipe_orig = pipes.cdp_name()
    if pipe_from == None:
        pipe_from = pipe_orig
    if pipe_to == None:
        pipe_to = pipe_orig

    # Test that the pipes exist.
    check_pipe(pipe_from)
    check_pipe(pipe_to)

    # Test that the pipes are not the same.
    if pipe_from == pipe_to:
        raise RelaxError("The source and destination pipes cannot be the same.")

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # Switch to the destination data pipe.
    pipes.switch(pipe_to)

    # Loop over the clusters.
    for spin_ids in loop_cluster():
        # Initialise some variables.
        model = None
        pA = []
        pB = []
        pC = []
        kex = []
        kex_AB = []
        kex_AC = []
        kex_BC = []
        k_AB = []
        kB = []
        kC = []
        tex = []
        count = 0
        spins_from = []
        spins_to = []
        selected_cluster = False

        # Loop over the spins, adding parameters to a list, which in the end will be used to find the median.
        for id in spin_ids:
            # Get the spins, then store them.
            spin_from = return_spin(id, pipe=pipe_from)
            spin_to = return_spin(id, pipe=pipe_to)
            spins_from.append(spin_from)
            spins_to.append(spin_to)

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The first printout.
            if not selected_cluster:
                subsection(file=sys.stdout, text="Copying parameters for the spin block %s"%spin_ids, prespace=2)

            # Change the cluster selection flag.
            selected_cluster = True

            # The model.
            if not model:
                model = spin_from.model

            # Check that the models match for all spins of the cluster.
            if spin_from.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the source data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))
            if spin_to.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the destination data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))

            # Sum the source parameters.
            if 'pA' in spin_from.params:
                pA.append(spin_from.pA)
            if 'pB' in spin_from.params:
                pB.append(spin_from.pB)
            if 'pC' in spin_from.params:
                pC.append(spin_from.pC)
            if 'kex' in spin_from.params:
                kex.append(spin_from.kex)
            if 'kex_AB' in spin_from.params:
                kex_AB.append(spin_from.kex_AB)
            if 'kex_AC' in spin_from.params:
                kex_AC.append(spin_from.kex_AC)
            if 'kex_BC' in spin_from.params:
                kex_BC.append(spin_from.kex_BC)
            if 'k_AB' in spin_from.params:
                k_AB.append(spin_from.k_AB)
            if 'kB' in spin_from.params:
                kB.append(spin_from.kB)
            if 'kC' in spin_from.params:
                kC.append(spin_from.kC)
            if 'tex' in spin_from.params:
                tex.append(spin_from.tex)

            # Increment the spin count.
            count += 1

        # The cluster is not selected, so move to the next.
        if not selected_cluster:
            continue

        # Take median of parameters.
        if len(pA) > 0:
            pA = median(pA)
            print("Median pA value:  %.15f" % pA)
        if len(pB) > 0:
            pB = median(pB)
            print("Median pB value:  %.15f" % pB)
        if len(pC) > 0:
            pC = median(pC)
            print("Median pC value:  %.15f" % pC)
        if len(kex) > 0:
            kex = median(kex)
            print("Median kex value: %.15f" % kex)
        if len(kex_AB) > 0:
            kex_AB = median(kex_AB)
            print("Median k_AB value: %.15f" % kex_AB)
        if len(kex_AC) > 0:
            kex_AC = median(kex_AC)
            print("Median k_AC value: %.15f" % kex_AC)
        if len(kex_BC) > 0:
            kex_BC = median(kex_BC)
            print("Median k_BC value: %.15f" % kex_BC)
        if len(k_AB) > 0:
            k_AB = median(k_AB)
            print("Median k_AB value: %.15f" % k_AB)
        if len(kB) > 0:
            kB = median(kB)
            print("Median kB value:  %.15f" % kB)
        if len(kC) > 0:
            kC = median(kC)
            print("Median kC value:  %.15f" % kC)
        if len(tex) > 0:
            tex = median(tex)
            print("Median tex value: %.15f" % tex)

        # Loop over the spins, this time copying the parameters.
        for i in range(len(spin_ids)):
            # Alias the containers.
            spin_from = spins_from[i]
            spin_to = spins_to[i]

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The R20 parameters.
            if 'r2' in spin_from.params:
                spin_to.r2 = deepcopy(spin_from.r2)
            if 'r2a' in spin_from.params:
                spin_to.r2a = deepcopy(spin_from.r2a)
            if 'r2b' in spin_from.params:
                spin_to.r2b = deepcopy(spin_from.r2b)

            # The median parameters.
            if 'pB' in spin_from.params and 'pC' not in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = pB
                spin_to.pC = 1.0 - pA - pB
            elif 'pA' in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = 1.0 - pA
            if 'kex' in spin_from.params:
                spin_to.kex = kex
            if 'kex_AB' in spin_from.params:
                spin_to.kex_AB = kex_AB
            if 'kex_AC' in spin_from.params:
                spin_to.kex_AC = kex_AC
            if 'kex_BC' in spin_from.params:
                spin_to.kex_BC = kex_BC
            if 'k_AB' in spin_from.params:
                spin_to.k_AB = k_AB
            if 'kB' in spin_from.params:
                spin_to.kB = kB
            if 'kC' in spin_from.params:
                spin_to.kC = kC
            if 'tex' in spin_from.params:
                spin_to.tex = tex

            # All other spin specific parameters.
            for param in spin_from.params:
                if param in ['r2', 'r2a', 'r2b', 'pA', 'pB', 'pC', 'kex', 'kex_AB', 'kex_AC', 'kex_BC', 'k_AB', 'kB', 'kC', 'tex']:
                    continue

                # Copy the value.
                setattr(spin_to, param, deepcopy(getattr(spin_from, param)))

    # Switch back to the original data pipe.
    pipes.switch(pipe_orig)
Exemple #18
0
def estimate_r2eff(method='minfx',
                   min_algor='simplex',
                   c_code=True,
                   constraints=False,
                   chi2_jacobian=False,
                   spin_id=None,
                   ftol=1e-15,
                   xtol=1e-15,
                   maxfev=10000000,
                   factor=100.0,
                   verbosity=1):
    """Estimate r2eff and errors by exponential curve fitting with scipy.optimize.leastsq or minfx.

    THIS IS ONLY FOR TESTING.

    scipy.optimize.leastsq is a wrapper around MINPACK's lmdif and lmder algorithms.

    MINPACK is a FORTRAN90 library which solves systems of nonlinear equations, or carries out the least squares minimization of the residual of a set of linear or nonlinear equations.

    Errors are calculated by taking the square root of the reported co-variance.

    This can be an huge time saving step, when performing model fitting in R1rho.
    Errors of R2eff values, are normally estimated by time-consuming Monte-Carlo simulations.

    Initial guess for the starting parameter x0 = [r2eff_est, i0_est], is by converting the exponential curve to a linear problem.
    Then solving initial guess by linear least squares of: ln(Intensity[j]) = ln(i0) - time[j]* r2eff.


    @keyword method:            The method to minimise and estimate errors.  Options are: 'minfx' or 'scipy.optimize.leastsq'.
    @type method:               string
    @keyword min_algor:         The minimisation algorithm
    @type min_algor:            string
    @keyword c_code:            If optimise with C code.
    @type c_code:               bool
    @keyword constraints:       If constraints should be used.
    @type constraints:          bool
    @keyword chi2_jacobian:     If the chi2 Jacobian should be used.
    @type chi2_jacobian:        bool
    @keyword spin_id:           The spin identification string.
    @type spin_id:              str
    @keyword ftol:              The function tolerance for the relative error desired in the sum of squares, parsed to leastsq.
    @type ftol:                 float
    @keyword xtol:              The error tolerance for the relative error desired in the approximate solution, parsed to leastsq.
    @type xtol:                 float
    @keyword maxfev:            The maximum number of function evaluations, parsed to leastsq.  If zero, then 100*(N+1) is the maximum function calls.  N is the number of elements in x0=[r2eff, i0].
    @type maxfev:               int
    @keyword factor:            The initial step bound, parsed to leastsq.  It determines the initial step bound (''factor * || diag * x||'').  Should be in the interval (0.1, 100).
    @type factor:               float
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    """

    # Perform checks.
    check_model_type(model=MODEL_R2EFF)

    # Check that the C modules have been compiled.
    if not C_module_exp_fn and method == 'minfx':
        raise RelaxError(
            "Relaxation curve fitting is not available.  Try compiling the C modules on your platform."
        )

    # Set class scipy setting.
    E = Exp(verbosity=verbosity)
    E.set_settings_leastsq(ftol=ftol, xtol=xtol, maxfev=maxfev, factor=factor)

    # Check if intensity errors have already been calculated by the user.
    precalc = True
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(
            selection=spin_id, full_info=True, return_id=True,
            skip_desel=True):
        # No structure.
        if not hasattr(cur_spin, 'peak_intensity_err'):
            precalc = False
            break

        # Determine if a spectrum ID is missing from the list.
        for id in cdp.spectrum_ids:
            if id not in cur_spin.peak_intensity_err:
                precalc = False
                break

    # Loop over the spins.
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(
            selection=spin_id, full_info=True, return_id=True,
            skip_desel=True):
        # Generate spin string.
        spin_string = generate_spin_string(spin=cur_spin,
                                           mol_name=mol_name,
                                           res_num=resi,
                                           res_name=resn)

        # Print information.
        if E.verbosity >= 1:
            # Individual spin block section.
            top = 2
            if E.verbosity >= 2:
                top += 2
            subsection(file=sys.stdout,
                       text="Fitting with %s to: %s" % (method, spin_string),
                       prespace=top)
            if method == 'minfx':
                subsection(
                    file=sys.stdout,
                    text=
                    "min_algor='%s', c_code=%s, constraints=%s, chi2_jacobian?=%s"
                    % (min_algor, c_code, constraints, chi2_jacobian),
                    prespace=0)

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point, ei, mi, oi, di in loop_exp_frq_offset_point(
                return_indices=True):
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type,
                                                   frq=frq,
                                                   offset=offset,
                                                   point=point)

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type,
                                  frq=frq,
                                  offset=offset,
                                  point=point):
                values.append(
                    average_intensity(spin=cur_spin,
                                      exp_type=exp_type,
                                      frq=frq,
                                      offset=offset,
                                      point=point,
                                      time=time))
                errors.append(
                    average_intensity(spin=cur_spin,
                                      exp_type=exp_type,
                                      frq=frq,
                                      offset=offset,
                                      point=point,
                                      time=time,
                                      error=True))
                times.append(time)

            # Convert to numpy array.
            values = asarray(values)
            errors = asarray(errors)
            times = asarray(times)

            # Initialise data.
            E.setup_data(values=values, errors=errors, times=times)

            # Get the result based on method.
            if method == 'scipy.optimize.leastsq':
                # Acquire results.
                results = minimise_leastsq(E=E)

            elif method == 'minfx':
                # Set settings.
                E.set_settings_minfx(min_algor=min_algor,
                                     c_code=c_code,
                                     chi2_jacobian=chi2_jacobian,
                                     constraints=constraints)

                # Acquire results.
                results = minimise_minfx(E=E)
            else:
                raise RelaxError(
                    "Method for minimisation not known. Try setting: method='scipy.optimize.leastsq'."
                )

            # Unpack results
            param_vector, param_vector_error, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Extract values.
            r2eff = param_vector[0]
            i0 = param_vector[1]
            r2eff_err = param_vector_error[0]
            i0_err = param_vector_error[1]

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector,
                                     spins=[cur_spin],
                                     key=param_key)

            # Errors.
            if not hasattr(cur_spin, 'r2eff_err'):
                setattr(cur_spin, 'r2eff_err',
                        deepcopy(getattr(cur_spin, 'r2eff')))
            if not hasattr(cur_spin, 'i0_err'):
                setattr(cur_spin, 'i0_err', deepcopy(getattr(cur_spin, 'i0')))

            # Set error.
            cur_spin.r2eff_err[param_key] = r2eff_err
            cur_spin.i0_err[param_key] = i0_err

            # Chi-squared statistic.
            cur_spin.chi2 = chi2

            # Iterations.
            cur_spin.f_count = f_count

            # Warning.
            cur_spin.warning = warning

            # Print information.
            print_strings = []
            if E.verbosity >= 1:
                # Add print strings.
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (
                    exp_type, frq / 1E6, offset, point, len(times))
                print_strings.append(point_info)

                par_info = "r2eff=%3.3f r2eff_err=%3.4f, i0=%6.1f, i0_err=%3.4f, chi2=%3.3f.\n" % (
                    r2eff, r2eff_err, i0, i0_err, chi2)
                print_strings.append(par_info)

                if E.verbosity >= 2:
                    time_info = ', '.join(map(str, times))
                    print_strings.append('For time array: ' + time_info +
                                         '.\n\n')

            # Print info
            if len(print_strings) > 0:
                for print_string in print_strings:
                    print(print_string),
Exemple #19
0
def copy(pipe_from=None, pipe_to=None):
    """Copy dispersion parameters from one data pipe to another, averaging values for clusters.

    @param pipe_from:   The data pipe to copy the value from.  This defaults to the current data pipe.
    @type pipe_from:    str
    @param pipe_to:     The data pipe to copy the value to.  This defaults to the current data pipe.
    @type pipe_to:      str
    """

    # The current data pipe.
    pipe_orig = pipes.cdp_name()
    if pipe_from == None:
        pipe_from = pipe_orig
    if pipe_to == None:
        pipe_to = pipe_orig

    # Test that the pipes exist.
    pipes.test(pipe_from)
    pipes.test(pipe_to)

    # Test that the pipes are not the same.
    if pipe_from == pipe_to:
        raise RelaxError("The source and destination pipes cannot be the same.")

    # Test if the sequence data for pipe_from is loaded.
    if not exists_mol_res_spin_data(pipe_from):
        raise RelaxNoSequenceError(pipe_from)

    # Test if the sequence data for pipe_to is loaded.
    if not exists_mol_res_spin_data(pipe_to):
        raise RelaxNoSequenceError(pipe_to)

    # Switch to the destination data pipe.
    pipes.switch(pipe_to)

    # Loop over the clusters.
    for spin_ids in loop_cluster():
        # Initialise some variables.
        model = None
        pA = 0.0
        pB = 0.0
        pC = 0.0
        kex = 0.0
        kex_AB = 0.0
        kex_AC = 0.0
        kex_BC = 0.0
        k_AB = 0.0
        kB = 0.0
        kC = 0.0
        tex = 0.0
        count = 0
        spins_from = []
        spins_to = []
        selected_cluster = False

        # Loop over the spins, summing the parameters to be averaged.
        for id in spin_ids:
            # Get the spins, then store them.
            spin_from = return_spin(id, pipe=pipe_from)
            spin_to = return_spin(id, pipe=pipe_to)
            spins_from.append(spin_from)
            spins_to.append(spin_to)

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The first printout.
            if not selected_cluster:
                subsection(file=sys.stdout, text="Copying parameters for the spin block %s"%spin_ids, prespace=2)

            # Change the cluster selection flag.
            selected_cluster = True

            # The model.
            if not model:
                model = spin_from.model

            # Check that the models match for all spins of the cluster.
            if spin_from.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the source data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))
            if spin_to.model != model:
                raise RelaxError("The model '%s' of spin '%s' from the destination data pipe does not match the '%s' model of previous spins of the cluster." % (spin_from.model, id, model))

            # Sum the source parameters.
            if 'pA' in spin_from.params:
                pA += spin_from.pA
            if 'pB' in spin_from.params:
                pB += spin_from.pB
            if 'pC' in spin_from.params:
                pC += spin_from.pC
            if 'kex' in spin_from.params:
                kex += spin_from.kex
            if 'kex_AB' in spin_from.params:
                kex_AB += spin_from.kex_AB
            if 'kex_AC' in spin_from.params:
                kex_AC += spin_from.kex_AC
            if 'kex_BC' in spin_from.params:
                kex_BC += spin_from.kex_BC
            if 'k_AB' in spin_from.params:
                k_AB += spin_from.k_AB
            if 'kB' in spin_from.params:
                kB += spin_from.kB
            if 'kC' in spin_from.params:
                kC += spin_from.kC
            if 'tex' in spin_from.params:
                tex += spin_from.tex

            # Increment the spin count.
            count += 1

        # The cluster is not selected, so move to the next.
        if not selected_cluster:
            continue

        # Average parameters.
        if pA != 0.0:
            pA = pA / count
            print("Averaged pA value:  %.15f" % pA)
        if pB != 0.0:
            pB = pB / count
            print("Averaged pA value:  %.15f" % pA)
        if pC != 0.0:
            pC = pC / count
            print("Averaged pC value:  %.15f" % pC)
        if kex != 0.0:
            kex = kex / count
            print("Averaged kex value: %.15f" % kex)
        if kex_AB != 0.0:
            kex_AB = kex_AB / count
            print("Averaged k_AB value: %.15f" % kex_AB)
        if kex_AC != 0.0:
            kex_AC = kex_AC / count
            print("Averaged k_AC value: %.15f" % kex_AC)
        if kex_BC != 0.0:
            kex_BC = kex_BC / count
            print("Averaged k_BC value: %.15f" % kex_BC)
        if k_AB != 0.0:
            k_AB = k_AB / count
            print("Averaged k_AB value: %.15f" % k_AB)
        if kB != 0.0:
            kB = kB / count
            print("Averaged kB value:  %.15f" % kB)
        if kC != 0.0:
            kC = kC / count
            print("Averaged kC value:  %.15f" % kC)
        if tex != 0.0:
            tex = tex / count
            print("Averaged tex value: %.15f" % tex)

        # Loop over the spins, this time copying the parameters.
        for i in range(len(spin_ids)):
            # Alias the containers.
            spin_from = spins_from[i]
            spin_to = spins_to[i]

            # Skip deselected spins.
            if not spin_from.select or not spin_to.select:
                continue

            # The R20 parameters.
            if 'r2' in spin_from.params:
                spin_to.r2 = deepcopy(spin_from.r2)
            if 'r2a' in spin_from.params:
                spin_to.r2a = deepcopy(spin_from.r2a)
            if 'r2b' in spin_from.params:
                spin_to.r2b = deepcopy(spin_from.r2b)

            # The averaged parameters.
            if 'pB' in spin_from.params and 'pC' not in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = pB
                spin_to.pC = 1.0 - pA - pB
            elif 'pA' in spin_from.params:
                spin_to.pA = pA
                spin_to.pB = 1.0 - pA
            if 'kex' in spin_from.params:
                spin_to.kex = kex
            if 'kex_AB' in spin_from.params:
                spin_to.kex_AB = kex_AB
            if 'kex_AC' in spin_from.params:
                spin_to.kex_AC = kex_AC
            if 'kex_BC' in spin_from.params:
                spin_to.kex_BC = kex_BC
            if 'k_AB' in spin_from.params:
                spin_to.k_AB = k_AB
            if 'kB' in spin_from.params:
                spin_to.kB = kB
            if 'kC' in spin_from.params:
                spin_to.kC = kC
            if 'tex' in spin_from.params:
                spin_to.tex = tex

            # All other spin specific parameters.
            for param in spin_from.params:
                if param in ['r2', 'pA', 'pB', 'pC', 'kex', 'kex_AB', 'kex_AC', 'kex_BC', 'k_AB', 'kB', 'kC', 'tex']:
                    continue

                # Copy the value.
                setattr(spin_to, param, deepcopy(getattr(spin_from, param)))

    # Switch back to the original data pipe.
    pipes.switch(pipe_orig)
Exemple #20
0
    def covariance_matrix(self, model_info=None, verbosity=1):
        """Return the Jacobian and weights required for parameter errors via the covariance matrix.

        @keyword model_info:    The spin container and the spin ID string from the _model_loop_spin() method.
        @type model_info:       SpinContainer instance, str
        @keyword verbosity:     The amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:        int
        @return:                The Jacobian and weight matrices for the given model.
        @rtype:                 numpy rank-2 array, numpy rank-2 array
        """

        # Unpack the data.
        spin, spin_id = model_info

        # Check that the C modules have been compiled.
        if not C_module_exp_fn:
            raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

        # Raise Error, if not optimised.
        if not (hasattr(spin, 'rx') and hasattr(spin, 'i0')):
            raise RelaxError("Spin '%s' does not contain optimised 'rx' and 'i0' values.  Try execute: minimise.execute(min_algor='Newton', constraints=False)"%(spin_id))

        # Raise warning, if gradient count is 0.  This could point to a lack of minimisation first.
        if hasattr(spin, 'g_count'):
            if getattr(spin, 'g_count') == 0.0:
                text = "Spin %s contains a gradient count of 0.0.  Is the rx parameter optimised?  Try execute: minimise.execute(min_algor='Newton', constraints=False)" %(spin_id)
                warn(RelaxWarning("%s." % text))

        # Print information.
        if verbosity >= 1:
            # Individual spin block section.
            top = 2
            if verbosity >= 2:
                top += 2
            subsection(file=sys.stdout, text="Estimating rx error for spin: %s"%spin_id, prespace=top)

        # The peak intensities and times.
        values = []
        errors = []
        times = []
        for key in spin.peak_intensity:
            values.append(spin.peak_intensity[key])
            errors.append(spin.peak_intensity_err[key])
            times.append(cdp.relax_times[key])

        # Convert to numpy array.
        values = asarray(values)
        errors = asarray(errors)
        times = asarray(times)

        # Create the parameter vector and scaling matrix (as a diagonalised list).
        param_vector = assemble_param_vector(spin=spin)
        scaling_list = []
        for i in range(len(spin.params)):
            scaling_list.append(1.0)

        # Initialise data in C code.
        model = Relax_fit_opt(model=spin.model, num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

        # Use the direct Jacobian from function.
        jacobian_matrix_exp = transpose(asarray( model.jacobian(param_vector) ) )
        weights = 1. / errors**2

        # Return the matrices.
        return jacobian_matrix_exp, weights
Exemple #21
0
def minimise_r2eff(spins=None, spin_ids=None, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
    """Optimise the R2eff model by fitting the 2-parameter exponential curves.

    This mimics the R1 and R2 relax_fit analysis.


    @keyword spins:             The list of spins for the cluster.
    @type spins:                list of SpinContainer instances
    @keyword spin_ids:          The list of spin IDs for the cluster.
    @type spin_ids:             list of str
    @keyword min_algor:         The minimisation algorithm to use.
    @type min_algor:            str
    @keyword min_options:       An array of options to be used by the minimisation algorithm.
    @type min_options:          array of str
    @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type func_tol:             None or float
    @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type grad_tol:             None or float
    @keyword max_iterations:    The maximum number of iterations for the algorithm.
    @type max_iterations:       int
    @keyword constraints:       If True, constraints are used during optimisation.
    @type constraints:          bool
    @keyword scaling_matrix:    The diagonal and square scaling matrix.
    @type scaling_matrix:       numpy rank-2, float64 array or None
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
    @type sim_index:            None or int
    @keyword lower:             The model specific lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type lower:                list of numbers
    @keyword upper:             The model specific upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type upper:                list of numbers
    @keyword inc:               The model specific increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
    @type inc:                  list of int
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Loop over the spins.
    for si in range(len(spins)):
        # Skip deselected spins.
        if not spins[si].select:
            continue

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point in loop_exp_frq_offset_point():
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

            # The initial parameter vector.
            param_vector = assemble_param_vector(spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Diagonal scaling.
            if scaling_matrix is not None:
                param_vector = dot(inv(scaling_matrix), param_vector)

            # Linear constraints.
            A, b = None, None
            if constraints:
                A, b = linear_constraints(spins=[spins[si]], scaling_matrix=scaling_matrix)

            # Print out.
            if verbosity >= 1:
                # Individual spin section.
                top = 2
                if verbosity >= 2:
                    top += 2
                text = "Fitting to spin %s, frequency %s and dispersion point %s" % (spin_ids[si], frq, point)
                subsection(file=sys.stdout, text=text, prespace=top)

                # Grid search printout.
                if match('^[Gg]rid', min_algor):
                    result = 1
                    for x in inc:
                        result = mul(result, x)
                    print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % result)

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            data_flag = True
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                # Check the peak intensity keys.
                int_keys = find_intensity_keys(exp_type=exp_type, frq=frq, offset=offset, point=point, time=time)
                peak_intensities = spins[si].peak_intensity
                if sim_index != None:
                    peak_intensities = spins[si].peak_intensity_sim
                for i in range(len(int_keys)):
                    if int_keys[i] not in peak_intensities:
                        if verbosity:
                            warn(RelaxWarning("The spin %s peak intensity key '%s' is not present, skipping the optimisation." % (spin_ids[si], int_keys[i])))
                        data_flag = False
                        break

                if data_flag:
                    values.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, sim_index=sim_index))
                    errors.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                    times.append(time)
            if not data_flag:
                continue

            # Raise errors if number of time points is less than 2.
            if len(times) < 3:
                subsection(file=sys.stdout, text="Exponential curve fitting error for point:", prespace=2)
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (exp_type, frq/1E6, offset, point, len(times))
                raise RelaxError("The data setup points to exponential curve fitting, but only %i time points was found, where 3 time points is minimum.  If calculating R2eff values for fixed relaxation time period data, check that a reference intensity has been specified for each offset value."%(len(times)))

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            if scaling_matrix is None:
                for i in range(len(param_vector)):
                    scaling_list.append(1.0)
            else:
                for i in range(len(scaling_matrix)):
                    scaling_list.append(scaling_matrix[i, i])

            # Initialise the function to minimise.
            model = Relax_fit_opt(model='exp', num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling_matrix is not None:
                param_vector = dot(scaling_matrix, param_vector)

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spins[si].chi2_sim[sim_index] = chi2

                # Iterations.
                spins[si].iter_sim[sim_index] = iter_count

                # Function evaluations.
                spins[si].f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spins[si].g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spins[si].h_count_sim[sim_index] = h_count

                # Warning.
                spins[si].warning_sim[sim_index] = warning

            # Normal statistics.
            else:
                # Chi-squared statistic.
                spins[si].chi2 = chi2

                # Iterations.
                spins[si].iter = iter_count

                # Function evaluations.
                spins[si].f_count = f_count

                # Gradient evaluations.
                spins[si].g_count = g_count

                # Hessian evaluations.
                spins[si].h_count = h_count

                # Warning.
                spins[si].warning = warning
Exemple #22
0
def estimate_r2eff_err(spin_id=None, epsrel=0.0, verbosity=1):
    """This will estimate the R2eff and i0 errors from the covariance matrix Qxx.  Qxx is calculated from the Jacobian matrix and the optimised parameters.

    @keyword spin_id:       The spin identification string.
    @type spin_id:          str
    @param epsrel:          Any columns of R which satisfy |R_{kk}| <= epsrel |R_{11}| are considered linearly-dependent and are excluded from the covariance matrix, where the corresponding rows and columns of the covariance matrix are set to zero.
    @type epsrel:           float
    @keyword verbosity:     The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:        int
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Perform checks.
    check_model_type(model=MODEL_R2EFF)

    # Loop over the spins.
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(selection=spin_id, full_info=True, return_id=True, skip_desel=True):
        # Generate spin string.
        spin_string = generate_spin_string(spin=cur_spin, mol_name=mol_name, res_num=resi, res_name=resn)

        # Raise Error, if not optimised.
        if not (hasattr(cur_spin, 'r2eff') and hasattr(cur_spin, 'i0')):
            raise RelaxError("Spin %s does not contain optimised 'r2eff' and 'i0' values.  Try execute: minimise.execute(min_algor='Newton', constraints=False)"%(spin_string))

        # Raise warning, if gradient count is 0.  This could point to a lack of minimisation first.
        if hasattr(cur_spin, 'g_count'):
            if getattr(cur_spin, 'g_count') == 0.0:
                text = "Spin %s contains a gradient count of 0.0.  Is the R2eff parameter optimised?  Try execute: minimise.execute(min_algor='Newton', constraints=False)" %(spin_string)
                warn(RelaxWarning("%s." % text))

        # Print information.
        if verbosity >= 1:
            # Individual spin block section.
            top = 2
            if verbosity >= 2:
                top += 2
            subsection(file=sys.stdout, text="Estimating R2eff error for spin: %s"%spin_string, prespace=top)

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point, ei, mi, oi, di in loop_exp_frq_offset_point(return_indices=True):
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

            # Extract values.
            r2eff = getattr(cur_spin, 'r2eff')[param_key]
            i0 = getattr(cur_spin, 'i0')[param_key]

            # Pack data
            param_vector = [r2eff, i0]

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                values.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time))
                errors.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                times.append(time)

            # Convert to numpy array.
            values = asarray(values)
            errors = asarray(errors)
            times = asarray(times)

            # Initialise data in C code.
            scaling_list = [1.0, 1.0]
            model = Relax_fit_opt(model='exp', num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

            # Use the direct Jacobian from function.
            jacobian_matrix_exp = transpose(asarray( model.jacobian(param_vector) ) )
            weights = 1. / errors**2

            # Get the co-variance
            pcov = multifit_covar(J=jacobian_matrix_exp, weights=weights)

            # To compute one standard deviation errors on the parameters, take the square root of the diagonal covariance.
            param_vector_error = sqrt(diag(pcov))

            # Extract values.
            r2eff_err, i0_err = param_vector_error

            # Copy r2eff dictionary, to r2eff_err dictionary. They have same keys to the dictionary,
            if not hasattr(cur_spin, 'r2eff_err'):
                setattr(cur_spin, 'r2eff_err', deepcopy(getattr(cur_spin, 'r2eff')))
            if not hasattr(cur_spin, 'i0_err'):
                setattr(cur_spin, 'i0_err', deepcopy(getattr(cur_spin, 'i0')))

            # Set error.
            cur_spin.r2eff_err[param_key] = r2eff_err
            cur_spin.i0_err[param_key] = i0_err

            # Get other relevant information.
            chi2 = getattr(cur_spin, 'chi2')

            # Print information.
            print_strings = []
            if verbosity >= 1:
                # Add print strings.
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (exp_type, frq/1E6, offset, point, len(times))
                print_strings.append(point_info)

                par_info = "r2eff=%3.3f r2eff_err=%3.4f, i0=%6.1f, i0_err=%3.4f, chi2=%3.3f.\n" % ( r2eff, r2eff_err, i0, i0_err, chi2)
                print_strings.append(par_info)

                if verbosity >= 2:
                    time_info = ', '.join(map(str, times))
                    print_strings.append('For time array: '+time_info+'.\n\n')

            # Print info
            if len(print_strings) > 0:
                for print_string in print_strings:
                    print(print_string),
Exemple #23
0
    def optimise(self, model=None, model_path=None):
        """Optimise the model, taking model nesting into account.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout.
        section(file=sys.stdout, text="Optimisation", prespace=2)

        # Deselect insignificant spins.
        if model not in [MODEL_R2EFF, MODEL_NOREX]:
            self.interpreter.relax_disp.insignificance(
                level=self.insignificance)

        # Speed-up grid-search by using minium R2eff value.
        if self.set_grid_r20 and model != MODEL_R2EFF:
            self.interpreter.relax_disp.r20_from_min_r2eff(force=True)

        # Use pre-run results as the optimisation starting point.
        # Test if file exists.
        if self.pre_run_dir:
            path = self.pre_run_dir + sep + model_path
            # File path.
            file_path = get_file_path('results', path)

            # Test if the file exists and determine the compression type.
            try:
                compress_type, file_path = determine_compression(file_path)
                res_file_exists = True

            except RelaxFileError:
                res_file_exists = False

        if self.pre_run_dir and res_file_exists:
            self.pre_run_parameters(model=model, model_path=model_path)

        # Otherwise use the normal nesting check and grid search if not nested.
        else:
            # Nested model simplification.
            nested = self.nesting(model=model)

            # Otherwise use a grid search of default values to start optimisation with.
            if not nested:
                # Grid search.
                if self.grid_inc:
                    self.interpreter.minimise.grid_search(inc=self.grid_inc)

                # Default values.
                else:
                    # The standard parameters.
                    for param in MODEL_PARAMS[model]:
                        self.interpreter.value.set(param=param, index=None)

                    # The optional R1 parameter.
                    if is_r1_optimised(model=model):
                        self.interpreter.value.set(param='r1', index=None)

        # 'R2eff' model minimisation flags.
        do_minimise = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # The minimisation algorithm to use.
            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Check if all spins contains 'r2eff and it associated error.
            has_r2eff = False

            # Loop over all spins.
            for cur_spin, spin_id in spin_loop(return_id=True,
                                               skip_desel=True):
                # Check 'r2eff'
                if hasattr(cur_spin, 'r2eff') and hasattr(
                        cur_spin, 'r2eff_err'):
                    has_r2eff = True
                else:
                    has_r2eff = False
                    break

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_minimise = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_minimise = True

        # Dispersion model minimisation flags.
        else:
            do_minimise = True
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Do the minimisation.
        if do_minimise:
            self.interpreter.minimise.execute(min_algor=min_algor,
                                              func_tol=self.opt_func_tol,
                                              max_iter=self.opt_max_iterations,
                                              constraints=constraints)

        # Model elimination.
        if self.eliminate:
            self.interpreter.eliminate()

        # Monte Carlo simulations.
        do_monte_carlo = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_monte_carlo = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_monte_carlo = True

        elif self.mc_sim_all_models or len(self.models) < 2:
            do_monte_carlo = True
            # The constraints flag.
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Error estimation by Monte Carlo simulations.
        if do_monte_carlo:
            # Set the number of Monte-Carlo simulations.
            monte_carlo_sim = self.mc_sim_num

            # If the number for exponential curve fitting has been set.
            if model == MODEL_R2EFF and self.exp_mc_sim_num != None:
                monte_carlo_sim = self.exp_mc_sim_num

            # When set to minus 1, estimation of the errors will be extracted from the covariance matrix.
            # This is HIGHLY likely to be wrong, but can be used in an initial test fase.
            if model == MODEL_R2EFF and self.exp_mc_sim_num == -1:
                # Print
                subsection(file=sys.stdout,
                           text="Estimating errors from Covariance matrix",
                           prespace=1)

                # Raise warning.
                text = 'Estimating errors from the Covariance matrix is highly likely to be "quite" wrong.  Use only with extreme care, and for initial rapid testing of your data.'
                warn(RelaxWarning(text))

                # Estimate errors
                self.interpreter.relax_disp.r2eff_err_estimate()
            else:
                self.interpreter.monte_carlo.setup(number=monte_carlo_sim)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(
                    min_algor=min_algor,
                    func_tol=self.opt_func_tol,
                    max_iter=self.opt_max_iterations,
                    constraints=constraints)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()
Exemple #24
0
    def optimise(self, model=None, model_path=None):
        """Optimise the model, taking model nesting into account.

        @keyword model:         The model to be optimised.
        @type model:            str
        @keyword model_path:    The folder name for the model, where possible spaces has been replaced with underscore.
        @type model_path:       str
        """

        # Printout. 
        section(file=sys.stdout, text="Optimisation", prespace=2)

        # Deselect insignificant spins.
        if model not in [MODEL_R2EFF, MODEL_NOREX]:
            self.interpreter.relax_disp.insignificance(level=self.insignificance)

        # Speed-up grid-search by using minium R2eff value.
        if self.set_grid_r20 and model != MODEL_R2EFF:
            self.interpreter.relax_disp.r20_from_min_r2eff(force=True)

        # Use pre-run results as the optimisation starting point.
        # Test if file exists.
        if self.pre_run_dir:
            path = self.pre_run_dir + sep + model_path
            # File path.
            file_path = get_file_path('results', path)

            # Test if the file exists and determine the compression type.
            try:
                compress_type, file_path = determine_compression(file_path)
                res_file_exists = True

            except RelaxFileError:
                res_file_exists = False

        if self.pre_run_dir and res_file_exists:
            self.pre_run_parameters(model=model, model_path=model_path)

        # Otherwise use the normal nesting check and grid search if not nested.
        else:
            # Nested model simplification.
            nested = self.nesting(model=model)

            # Otherwise use a grid search of default values to start optimisation with.
            if not nested:
                # Grid search.
                if self.grid_inc:
                    self.interpreter.minimise.grid_search(inc=self.grid_inc)

                # Default values.
                else:
                    # The standard parameters.
                    for param in MODEL_PARAMS[model]:
                        self.interpreter.value.set(param=param, index=None)

                    # The optional R1 parameter.
                    if is_r1_optimised(model=model):
                        self.interpreter.value.set(param='r1', index=None)

        # 'R2eff' model minimisation flags.
        do_minimise = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # The minimisation algorithm to use.
            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Check if all spins contains 'r2eff and it associated error.
            has_r2eff = False

            # Loop over all spins.
            for cur_spin, spin_id in spin_loop(return_id=True, skip_desel=True):
                # Check 'r2eff'
                if hasattr(cur_spin, 'r2eff') and hasattr(cur_spin, 'r2eff_err'):
                    has_r2eff = True
                else:
                    has_r2eff = False
                    break

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_minimise = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_minimise = True

        # Dispersion model minimisation flags.
        else:
            do_minimise = True
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Do the minimisation.
        if do_minimise:
            self.interpreter.minimise.execute(min_algor=min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, constraints=constraints)

        # Model elimination.
        if self.eliminate:
            self.interpreter.eliminate()

        # Monte Carlo simulations.
        do_monte_carlo = False
        if model == MODEL_R2EFF:
            # The constraints flag.
            constraints = False

            # Both the Jacobian and Hessian matrix has been specified for exponential curve-fitting, allowing for the much faster algorithms to be used.
            min_algor = 'Newton'

            # Skip optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is not raised.
            if has_r2eff and not self.optimise_r2eff:
                pass

            # Do optimisation, if 'r2eff' + 'r2eff_err' is present and flag for forcing optimisation is raised.
            elif has_r2eff and self.optimise_r2eff:
                do_monte_carlo = True

            # Optimise, if no R2eff and error is present.
            elif not has_r2eff:
                do_monte_carlo = True

        elif self.mc_sim_all_models or len(self.models) < 2:
            do_monte_carlo = True
            # The constraints flag.
            constraints = True
            # The minimisation algorithm to use. If the Jacobian and Hessian matrix have not been specified for fitting, 'simplex' should be used.
            min_algor = 'simplex'

        # Error estimation by Monte Carlo simulations.
        if do_monte_carlo:
            # Set the number of Monte-Carlo simulations.
            monte_carlo_sim = self.mc_sim_num

            # If the number for exponential curve fitting has been set.
            if model == MODEL_R2EFF and self.exp_mc_sim_num != None:
                monte_carlo_sim = self.exp_mc_sim_num

            # When set to minus 1, estimation of the errors will be extracted from the covariance matrix.
            # This is HIGHLY likely to be wrong, but can be used in an initial test fase.
            if model == MODEL_R2EFF and self.exp_mc_sim_num == -1:
                # Print
                subsection(file=sys.stdout, text="Estimating errors from Covariance matrix", prespace=1)

                # Raise warning.
                text = 'Estimating errors from the Covariance matrix is highly likely to be "quite" wrong.  Use only with extreme care, and for initial rapid testing of your data.'
                warn(RelaxWarning(text))

                # Estimate errors
                self.interpreter.relax_disp.r2eff_err_estimate()
            else:
                self.interpreter.monte_carlo.setup(number=monte_carlo_sim)
                self.interpreter.monte_carlo.create_data()
                self.interpreter.monte_carlo.initial_values()
                self.interpreter.minimise.execute(min_algor=min_algor, func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, constraints=constraints)
                if self.eliminate:
                    self.interpreter.eliminate()
                self.interpreter.monte_carlo.error_analysis()
Exemple #25
0
def create_geometric_rep(format='PDB',
                         file=None,
                         dir=None,
                         compress_type=0,
                         size=30.0,
                         inc=36,
                         force=False):
    """Create a PDB file containing a geometric object representing the frame order dynamics.

    @keyword format:        The format for outputting the geometric representation.  Currently only the 'PDB' format is supported.
    @type format:           str
    @keyword file:          The name of the file of the PDB representation of the frame order dynamics to create.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword compress_type: The compression type.  The integer values correspond to the compression type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.
    @type compress_type:    int
    @keyword size:          The size of the geometric object in Angstroms.
    @type size:             float
    @keyword inc:           The number of increments for the filling of the cone objects.
    @type inc:              int
    @keyword force:         Flag which if set to True will cause any pre-existing file to be overwritten.
    @type force:            bool
    """

    # Printout.
    subsection(
        file=sys.stdout,
        text=
        "Creating a PDB file containing a geometric object representing the frame order dynamics."
    )

    # Checks.
    check_parameters(escalate=2)

    # Initialise.
    titles = []
    structures = []
    representation = []
    sims = []
    file_root = []

    # Symmetry for inverted representations?
    sym = True
    if cdp.model in [MODEL_ROTOR, MODEL_FREE_ROTOR, MODEL_DOUBLE_ROTOR]:
        sym = False

    # The standard representation.
    titles.append("Representation A")
    structures.append(Internal())
    if sym:
        representation.append('A')
        file_root.append("%s_A" % file)
    else:
        representation.append(None)
        file_root.append(file)
    sims.append(False)

    # The inverted representation.
    if sym:
        titles.append("Representation A")
        structures.append(Internal())
        representation.append('B')
        file_root.append("%s_B" % file)
        sims.append(False)

    # The standard MC simulation representation.
    if hasattr(cdp, 'sim_number'):
        titles.append("MC simulation representation A")
        structures.append(Internal())
        if sym:
            representation.append('A')
            file_root.append("%s_sim_A" % file)
        else:
            representation.append(None)
            file_root.append("%s_sim" % file)
        sims.append(True)

    # The inverted MC simulation representation.
    if hasattr(cdp, 'sim_number') and sym:
        titles.append("MC simulation representation B")
        structures.append(Internal())
        representation.append('B')
        file_root.append("%s_sim_B" % file)
        sims.append(True)

    # Loop over each structure and add the contents.
    for i in range(len(structures)):
        # Printout.
        subsubsection(file=sys.stdout, text="Creating the %s." % titles[i])

        # Create a model for each Monte Carlo simulation.
        if sims[i]:
            for sim_i in range(cdp.sim_number):
                structures[i].add_model(model=sim_i + 1)

        # Add the pivots.
        add_pivots(structure=structures[i], sims=sims[i])

        # Add all rotor objects.
        add_rotors(structure=structures[i],
                   representation=representation[i],
                   size=size,
                   sims=sims[i])

        # Add the axis systems.
        add_axes(structure=structures[i],
                 representation=representation[i],
                 size=size,
                 sims=sims[i])

        # Add the cone objects.
        if cdp.model not in [
                MODEL_ROTOR, MODEL_FREE_ROTOR, MODEL_DOUBLE_ROTOR
        ]:
            add_cones(structure=structures[i],
                      representation=representation[i],
                      size=size,
                      inc=inc,
                      sims=sims[i])

        # Add atoms for creating titles.
        add_titles(structure=structures[i],
                   representation=representation[i],
                   displacement=size + 10,
                   sims=sims[i])

        # Create the PDB file.
        if format == 'PDB':
            pdb_file = open_write_file(file_root[i] + '.pdb',
                                       dir,
                                       compress_type=compress_type,
                                       force=force)
            structures[i].write_pdb(pdb_file)
            pdb_file.close()
Exemple #26
0
def create_ave_pos(format='PDB',
                   file=None,
                   dir=None,
                   compress_type=0,
                   model=1,
                   force=False):
    """Create a PDB file of the molecule with the moving domains shifted to the average position.

    @keyword format:        The format for outputting the geometric representation.  Currently only the 'PDB' format is supported.
    @type format:           str
    @keyword file:          The name of the file for the average molecule structure.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword compress_type: The compression type.  The integer values correspond to the compression type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.
    @type compress_type:    int
    @keyword model:         Only one model from an analysed ensemble can be used for the PDB representation of the Monte Carlo simulations, as these consists of one model per simulation.
    @type model:            int
    @keyword force:         Flag which if set to True will cause any pre-existing file to be overwritten.
    @type force:            bool
    """

    # Printout.
    subsection(
        file=sys.stdout,
        text=
        "Creating a PDB file with the moving domains shifted to the average position."
    )

    # Checks.
    if not hasattr(cdp, 'structure'):
        warn(
            RelaxWarning(
                "No structural data is present, cannot create the average position representation."
            ))
        return

    # Initialise.
    titles = []
    sims = []
    file_root = []
    models = []
    structures = []

    # The real average position.
    titles.append("real average position")
    sims.append(False)
    file_root.append(file)
    models.append([None])

    # The positive MC simulation representation.
    if hasattr(cdp, 'sim_number'):
        titles.append("MC simulation representation")
        sims.append(True)
        file_root.append("%s_sim" % file)
        models.append([i + 1 for i in range(cdp.sim_number)])

    # Make a copy of the structural object (so as to preserve the original structure).
    structures.append(deepcopy(cdp.structure))
    if hasattr(cdp, 'sim_number'):
        structures.append(deepcopy(cdp.structure))

    # Delete all but the chosen model for the simulations.
    if hasattr(cdp, 'sim_number') and len(structures[-1].structural_data) > 1:
        # Determine the models to delete.
        to_delete = []
        for model_cont in structures[-1].model_loop():
            if model_cont.num != model:
                to_delete.append(model_cont.num)
        to_delete.reverse()

        # Delete them.
        for num in to_delete:
            structures[-1].structural_data.delete_model(model_num=num)

    # Loop over each representation and add the contents.
    for i in range(len(titles)):
        # Printout.
        subsubsection(file=sys.stdout, text="Creating the %s." % titles[i])

        # Loop over each model.
        for j in range(len(models[i])):
            # Create or set the models, if needed.
            if models[i][j] == 1:
                structures[i].set_model(model_new=1)
            elif models[i][j] != None:
                structures[i].add_model(model=models[i][j])

        # Shift to the average position.
        average_position(structure=structures[i],
                         models=models[i],
                         sim=sims[i])

        # Output to PDB format.
        if format == 'PDB':
            pdb_file = open_write_file(file_name=file_root[i] + '.pdb',
                                       dir=dir,
                                       compress_type=compress_type,
                                       force=force)
            structures[i].write_pdb(file=pdb_file)
            pdb_file.close()
Exemple #27
0
def sn_ratio_deselection(ratio=10.0,
                         operation='<',
                         all_sn=False,
                         select=False,
                         verbose=True):
    """Use user function deselect.spin on spins with signal to noise ratio higher or lower than ratio.  The operation determines the selection operation.

    @keyword ratio:         The ratio to compare to.
    @type ratio:            float
    @keyword operation:     The comparison operation by which to select the spins.  Of the operation(sn_ratio, ratio), where operation can either be:  '<', '<=', '>', '>=', '==', '!='.
    @type operation:        str
    @keyword all_sn:        A flag specifying if all the signal to noise ratios per spin should match the comparison operator, of if just a single comparison match is enough.
    @type all_sn:           bool
    @keyword select:        A flag specifying if the user function select.spin should be used instead.
    @type select:           bool
    @keyword verbose:       A flag which if True will print additional information out.
    @type verbose:          bool
    """

    # Tests.
    check_pipe()
    check_mol_res_spin_data()

    # Test if spectra have been loaded.
    if not hasattr(cdp, 'spectrum_ids'):
        raise RelaxError("No spectra have been loaded.")

    # Assign the comparison operator.
    # "'<' : strictly less than"
    if operation == '<':
        op = operator.lt

    # "'<=' : less than or equal"
    elif operation == '<=':
        op = operator.le

    # "'>' : strictly greater than"
    elif operation == '>':
        op = operator.gt

    # "'>=' : greater than or equal"
    elif operation == '>=':
        op = operator.ge

    # "'==' : equal"
    elif operation == '==':
        op = operator.eq

    # "'!=' : not equal",
    elif operation == '!=':
        op = operator.ne

    # If not assigned, raise error.
    else:
        raise RelaxError(
            "The compare operation does not belong to the allowed list of methods: ['<', '<=', '>', '>=', '==', '!=']"
        )

    # Assign text for print out.
    if all_sn:
        text_all_sn = "all"
    else:
        text_all_sn = "any"

    if select:
        text_sel = "selected"
        sel_func = sel_spin
    else:
        text_sel = "deselected"
        sel_func = desel_spin

    # Print
    section(file=sys.stdout,
            text="Signal to noise ratio comparison selection",
            prespace=1,
            postspace=0)
    print("For the comparion test: S/N %s %1.1f" % (operation, ratio))

    # Loop over the spins.
    spin_ids = []
    for spin, spin_id in spin_loop(return_id=True):
        # Skip spins missing sn_ratio.
        if not hasattr(spin, 'sn_ratio'):
            # Skip warning for deselected spins.
            if spin.select:
                warn(
                    RelaxWarning(
                        "Spin '%s' does not contain Signal to Noise calculations. Perform the user function 'spectrum.sn_ratio'. This spin is skipped."
                        % spin_id))
            continue

        # Loop over the ID, collect and sort.
        ids = []
        for id in spin.peak_intensity:
            # Append the ID to the list.
            ids.append(id)

        # Sort the ids alphanumeric.
        ids = sort_filenames(filenames=ids, rev=False)

        # Loop over the sorted ids.
        sn_val = []
        for id in ids:
            # Append the Signal to Noise in the list.
            sn_val.append(spin.sn_ratio[id])

        # Convert the list to array.
        sn_val = asarray(sn_val)

        # Make the comparison for the whole array.
        test_arr = op(sn_val, ratio)

        # Determine how the test should evaluate.
        if all_sn:
            test = test_arr.all()
        else:
            test = test_arr.any()

        # Make an numpy array for the ids, an extract id which failed the test.
        ids_arr = asarray(ids)
        ids_test_arr = ids_arr[test_arr]

        # Make inversion of bool
        test_arr_inv = test_arr == False
        ids_test_arr_inv = ids_arr[test_arr_inv]

        # print
        if verbose:
            subsection(
                file=sys.stdout,
                text="Signal to noise ratio comparison for spin ID '%s'" %
                spin_id,
                prespace=1,
                postspace=0)
            print("Following spectra ID evaluated to True: %s" % ids_test_arr)
            print("Following spectra ID evaluated to False: %s" %
                  ids_test_arr_inv)
            print(
                "'%s' comparisons have been used for evaluation, which evaluated to: %s"
                % (text_all_sn, test))
            if test:
                print("The spin ID '%s' is %s" % (spin_id, text_sel))
            else:
                print("The spin ID '%s' is skipped" % spin_id)

        # If the test evaluates to True, then do selection action.
        if test:
            # Select/Deselect the spin.
            sel_func(spin_id=spin_id)

            # Assign spin_id to list, for printing.
            spin_ids.append(spin_id)

    # Make summary
    if verbose:
        if len(spin_ids) > 0:
            subsection(
                file=sys.stdout,
                text=
                "For all of the S/N comparion test, the following spin ID's was %s"
                % text_sel,
                prespace=1,
                postspace=0)
            print(spin_ids)
Exemple #28
0
def estimate_r2eff_err(spin_id=None, epsrel=0.0, verbosity=1):
    """This will estimate the R2eff and i0 errors from the covariance matrix Qxx.  Qxx is calculated from the Jacobian matrix and the optimised parameters.

    @keyword spin_id:       The spin identification string.
    @type spin_id:          str
    @param epsrel:          Any columns of R which satisfy |R_{kk}| <= epsrel |R_{11}| are considered linearly-dependent and are excluded from the covariance matrix, where the corresponding rows and columns of the covariance matrix are set to zero.
    @type epsrel:           float
    @keyword verbosity:     The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:        int
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError(
            "Relaxation curve fitting is not available.  Try compiling the C modules on your platform."
        )

    # Perform checks.
    check_model_type(model=MODEL_R2EFF)

    # Loop over the spins.
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(
            selection=spin_id, full_info=True, return_id=True,
            skip_desel=True):
        # Generate spin string.
        spin_string = generate_spin_string(spin=cur_spin,
                                           mol_name=mol_name,
                                           res_num=resi,
                                           res_name=resn)

        # Raise Error, if not optimised.
        if not (hasattr(cur_spin, 'r2eff') and hasattr(cur_spin, 'i0')):
            raise RelaxError(
                "Spin %s does not contain optimised 'r2eff' and 'i0' values.  Try execute: minimise.execute(min_algor='Newton', constraints=False)"
                % (spin_string))

        # Raise warning, if gradient count is 0.  This could point to a lack of minimisation first.
        if hasattr(cur_spin, 'g_count'):
            if getattr(cur_spin, 'g_count') == 0.0:
                text = "Spin %s contains a gradient count of 0.0.  Is the R2eff parameter optimised?  Try execute: minimise.execute(min_algor='Newton', constraints=False)" % (
                    spin_string)
                warn(RelaxWarning("%s." % text))

        # Print information.
        if verbosity >= 1:
            # Individual spin block section.
            top = 2
            if verbosity >= 2:
                top += 2
            subsection(file=sys.stdout,
                       text="Estimating R2eff error for spin: %s" %
                       spin_string,
                       prespace=top)

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point, ei, mi, oi, di in loop_exp_frq_offset_point(
                return_indices=True):
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type,
                                                   frq=frq,
                                                   offset=offset,
                                                   point=point)

            # Extract values.
            r2eff = getattr(cur_spin, 'r2eff')[param_key]
            i0 = getattr(cur_spin, 'i0')[param_key]

            # Pack data
            param_vector = [r2eff, i0]

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type,
                                  frq=frq,
                                  offset=offset,
                                  point=point):
                values.append(
                    average_intensity(spin=cur_spin,
                                      exp_type=exp_type,
                                      frq=frq,
                                      offset=offset,
                                      point=point,
                                      time=time))
                errors.append(
                    average_intensity(spin=cur_spin,
                                      exp_type=exp_type,
                                      frq=frq,
                                      offset=offset,
                                      point=point,
                                      time=time,
                                      error=True))
                times.append(time)

            # Convert to numpy array.
            values = asarray(values)
            errors = asarray(errors)
            times = asarray(times)

            # Initialise data in C code.
            scaling_list = [1.0, 1.0]
            model = Relax_fit_opt(model='exp',
                                  num_params=len(param_vector),
                                  values=values,
                                  errors=errors,
                                  relax_times=times,
                                  scaling_matrix=scaling_list)

            # Use the direct Jacobian from function.
            jacobian_matrix_exp = transpose(
                asarray(model.jacobian(param_vector)))
            weights = 1. / errors**2

            # Get the co-variance
            pcov = multifit_covar(J=jacobian_matrix_exp, weights=weights)

            # To compute one standard deviation errors on the parameters, take the square root of the diagonal covariance.
            param_vector_error = sqrt(diag(pcov))

            # Extract values.
            r2eff_err, i0_err = param_vector_error

            # Copy r2eff dictionary, to r2eff_err dictionary. They have same keys to the dictionary,
            if not hasattr(cur_spin, 'r2eff_err'):
                setattr(cur_spin, 'r2eff_err',
                        deepcopy(getattr(cur_spin, 'r2eff')))
            if not hasattr(cur_spin, 'i0_err'):
                setattr(cur_spin, 'i0_err', deepcopy(getattr(cur_spin, 'i0')))

            # Set error.
            cur_spin.r2eff_err[param_key] = r2eff_err
            cur_spin.i0_err[param_key] = i0_err

            # Get other relevant information.
            chi2 = getattr(cur_spin, 'chi2')

            # Print information.
            print_strings = []
            if verbosity >= 1:
                # Add print strings.
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (
                    exp_type, frq / 1E6, offset, point, len(times))
                print_strings.append(point_info)

                par_info = "r2eff=%3.3f r2eff_err=%3.4f, i0=%6.1f, i0_err=%3.4f, chi2=%3.3f.\n" % (
                    r2eff, r2eff_err, i0, i0_err, chi2)
                print_strings.append(par_info)

                if verbosity >= 2:
                    time_info = ', '.join(map(str, times))
                    print_strings.append('For time array: ' + time_info +
                                         '.\n\n')

            # Print info
            if len(print_strings) > 0:
                for print_string in print_strings:
                    print(print_string),
Exemple #29
0
    def nesting(self, model=None):
        """Support for model nesting.

        If model nesting is detected, the optimised parameters from the simpler model will be used for the more complex model.  The method will then signal if the nesting condition is met for the model, allowing the grid search to be skipped.


        @keyword model: The model to be optimised.
        @type model:    str
        @return:        True if the model parameters is equivalent to the nested model, and all parameters are copied.  False if none or some of the parameters have been translated from the nested model.  Here the Grid search should still be performed.
        @rtype:         bool
        """

        # Printout. 
        subsection(file=sys.stdout, text="Nesting and model equivalence checks", prespace=1)

        # The simpler model.
        model_info, comparable_model_info = nesting_model(self_models=self.models, model=model)
        if comparable_model_info != None:
            nested_pipe = self.name_pipe(comparable_model_info.model)
        else:
            nested_pipe = None

        # No nesting.
        if not nested_pipe:
            print("No model nesting or model equivalence detected.")
            return False

        # Copying the parameters to a numerical model from an analytic solution.
        if model_info.eq in [EQ_NUMERIC, EQ_SILICO] and comparable_model_info.eq == EQ_ANALYTIC:
            analytic = True
        else:
            analytic = False

        # Determine if model is equivalent or nested.
        if model_info.params == comparable_model_info.params:
            equivalent = True
        else:
            equivalent = False

        # Printout.
        if equivalent:
            print("Model equivalence detected, copying the optimised parameters from the '%s' model rather than performing a grid search." % comparable_model_info.model)
        else:
            print("Model nesting detected, translating the optimised parameters %s from the '%s' model to the parameters %s of model '%s'.  A grid search is issued for the remaining parameters." % (comparable_model_info.params, comparable_model_info.model, model_info.params, model))
        if analytic:
            print("The parameters are copied from a %s model to a %s model." % (comparable_model_info.eq, model_info.eq))

        # Get the dictionary of how the model parameters of the current model can be copied.
        par_dic = nesting_param(model_params=model_info.params, nested_model_params=comparable_model_info.params)

        # Loop over the parameters in current model.
        for param in model_info.params:
            # Extract how parameter is translated.
            param_conv = par_dic[param]

            # If the param_conv is None, then continue.
            if param_conv == None:
                continue

            print("Copying from parameter '%s' to '%s'." % (param_conv, param))

            # Loop over the spins to copy the parameters.
            for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
                # Get the nested spin.
                nested_spin = return_spin(spin_id=spin_id, pipe=nested_pipe)

                # Set value.
                # Some special conversions.
                if param_conv == '1 - pA':
                    val = 1.0 - getattr(nested_spin, 'pA')

                elif param_conv == '0.0':
                    val = 0.0

                else:
                    val = deepcopy(getattr(nested_spin, param_conv))

                # Set the attribute.
                setattr(spin, param, val)

        # Determine if model is equivalent, and should not be Grid searched, or if nested, and some parameters are pre-set. Here Grid search should still be issued.
        return equivalent
Exemple #30
0
def create_ave_pos(format='PDB', file=None, dir=None, compress_type=0, model=1, force=False):
    """Create a PDB file of the molecule with the moving domains shifted to the average position.

    @keyword format:        The format for outputting the geometric representation.  Currently only the 'PDB' format is supported.
    @type format:           str
    @keyword file:          The name of the file for the average molecule structure.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword compress_type: The compression type.  The integer values correspond to the compression type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.
    @type compress_type:    int
    @keyword model:         Only one model from an analysed ensemble can be used for the PDB representation of the Monte Carlo simulations, as these consists of one model per simulation.
    @type model:            int
    @keyword force:         Flag which if set to True will cause any pre-existing file to be overwritten.
    @type force:            bool
    """

    # Printout.
    subsection(file=sys.stdout, text="Creating a PDB file with the moving domains shifted to the average position.")

    # Checks.
    if not hasattr(cdp, 'structure'):
        warn(RelaxWarning("No structural data is present, cannot create the average position representation."))
        return

    # Initialise.
    titles = []
    sims = []
    file_root = []
    models = []
    structures = []

    # The real average position.
    titles.append("real average position")
    sims.append(False)
    file_root.append(file)
    models.append([None])

    # The positive MC simulation representation.
    if hasattr(cdp, 'sim_number'):
        titles.append("MC simulation representation")
        sims.append(True)
        file_root.append("%s_sim" % file)
        models.append([i+1 for i in range(cdp.sim_number)])

    # Make a copy of the structural object (so as to preserve the original structure).
    structures.append(deepcopy(cdp.structure))
    if hasattr(cdp, 'sim_number'):
        structures.append(deepcopy(cdp.structure))

    # Delete all but the chosen model for the simulations.
    if hasattr(cdp, 'sim_number') and len(structures[-1].structural_data) > 1:
        # Determine the models to delete.
        to_delete = []
        for model_cont in structures[-1].model_loop():
            if model_cont.num != model:
                to_delete.append(model_cont.num)
        to_delete.reverse()

        # Delete them.
        for num in to_delete:
            structures[-1].structural_data.delete_model(model_num=num)

    # Loop over each representation and add the contents.
    for i in range(len(titles)):
        # Printout.
        subsubsection(file=sys.stdout, text="Creating the %s." % titles[i])

        # Loop over each model.
        for j in range(len(models[i])):
            # Create or set the models, if needed.
            if models[i][j] == 1:
                structures[i].set_model(model_new=1)
            elif models[i][j] != None:
                structures[i].add_model(model=models[i][j])

        # Shift to the average position.
        average_position(structure=structures[i], models=models[i], sim=sims[i])

        # Output to PDB format.
        if format == 'PDB':
            pdb_file = open_write_file(file_name=file_root[i]+'.pdb', dir=dir, compress_type=compress_type, force=force)
            structures[i].write_pdb(file=pdb_file)
            pdb_file.close()
Exemple #31
0
    def covariance_matrix(self, model_info=None, verbosity=1):
        """Return the Jacobian and weights required for parameter errors via the covariance matrix.

        @keyword model_info:    The spin container and the spin ID string from the _model_loop_spin() method.
        @type model_info:       SpinContainer instance, str
        @keyword verbosity:     The amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:        int
        @return:                The Jacobian and weight matrices for the given model.
        @rtype:                 numpy rank-2 array, numpy rank-2 array
        """

        # Unpack the data.
        spin, spin_id = model_info

        # Check that the C modules have been compiled.
        if not C_module_exp_fn:
            raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

        # Raise Error, if not optimised.
        if not (hasattr(spin, 'rx') and hasattr(spin, 'i0')):
            raise RelaxError("Spin '%s' does not contain optimised 'rx' and 'i0' values.  Try execute: minimise.execute(min_algor='Newton', constraints=False)"%(spin_id))

        # Raise warning, if gradient count is 0.  This could point to a lack of minimisation first.
        if hasattr(spin, 'g_count'):
            if getattr(spin, 'g_count') == 0.0:
                text = "Spin %s contains a gradient count of 0.0.  Is the rx parameter optimised?  Try execute: minimise.execute(min_algor='Newton', constraints=False)" %(spin_id)
                warn(RelaxWarning("%s." % text))

        # Print information.
        if verbosity >= 1:
            # Individual spin block section.
            top = 2
            if verbosity >= 2:
                top += 2
            subsection(file=sys.stdout, text="Estimating rx error for spin: %s"%spin_id, prespace=top)

        # The peak intensities and times.
        values = []
        errors = []
        times = []
        for key in spin.peak_intensity:
            values.append(spin.peak_intensity[key])
            errors.append(spin.peak_intensity_err[key])
            times.append(cdp.relax_times[key])

        # Convert to numpy array.
        values = asarray(values)
        errors = asarray(errors)
        times = asarray(times)

        # Create the parameter vector and scaling matrix (as a diagonalised list).
        param_vector = assemble_param_vector(spin=spin)
        scaling_list = []
        for i in range(len(spin.params)):
            scaling_list.append(1.0)

        # Initialise data in C code.
        model = Relax_fit_opt(model=spin.model, num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

        # Use the direct Jacobian from function.
        jacobian_matrix_exp = transpose(asarray( model.jacobian(param_vector) ) )
        weights = 1. / errors**2

        # Return the matrices.
        return jacobian_matrix_exp, weights
Exemple #32
0
    def nesting(self, model=None):
        """Support for model nesting.

        If model nesting is detected, the optimised parameters from the simpler model will be used for the more complex model.  The method will then signal if the nesting condition is met for the model, allowing the grid search to be skipped.


        @keyword model: The model to be optimised.
        @type model:    str
        @return:        True if the model parameters is equivalent to the nested model, and all parameters are copied.  False if none or some of the parameters have been translated from the nested model.  Here the Grid search should still be performed.
        @rtype:         bool
        """

        # Printout.
        subsection(file=sys.stdout,
                   text="Nesting and model equivalence checks",
                   prespace=1)

        # The simpler model.
        model_info, comparable_model_info = nesting_model(
            self_models=self.models, model=model)
        if comparable_model_info != None:
            nested_pipe = self.name_pipe(comparable_model_info.model)
        else:
            nested_pipe = None

        # No nesting.
        if not nested_pipe:
            print("No model nesting or model equivalence detected.")
            return False

        # Copying the parameters to a numerical model from an analytic solution.
        if model_info.eq in [EQ_NUMERIC, EQ_SILICO
                             ] and comparable_model_info.eq == EQ_ANALYTIC:
            analytic = True
        else:
            analytic = False

        # Determine if model is equivalent or nested.
        if model_info.params == comparable_model_info.params:
            equivalent = True
        else:
            equivalent = False

        # Printout.
        if equivalent:
            print(
                "Model equivalence detected, copying the optimised parameters from the '%s' model rather than performing a grid search."
                % comparable_model_info.model)
        else:
            print(
                "Model nesting detected, translating the optimised parameters %s from the '%s' model to the parameters %s of model '%s'.  A grid search is issued for the remaining parameters."
                % (comparable_model_info.params, comparable_model_info.model,
                   model_info.params, model))
        if analytic:
            print("The parameters are copied from a %s model to a %s model." %
                  (comparable_model_info.eq, model_info.eq))

        # Get the dictionary of how the model parameters of the current model can be copied.
        par_dic = nesting_param(
            model_params=model_info.params,
            nested_model_params=comparable_model_info.params)

        # Loop over the parameters in current model.
        for param in model_info.params:
            # Extract how parameter is translated.
            param_conv = par_dic[param]

            # If the param_conv is None, then continue.
            if param_conv == None:
                continue

            print("Copying from parameter '%s' to '%s'." % (param_conv, param))

            # Loop over the spins to copy the parameters.
            for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
                # Get the nested spin.
                nested_spin = return_spin(spin_id=spin_id, pipe=nested_pipe)

                # Set value.
                # Some special conversions.
                if param_conv == '1 - pA':
                    val = 1.0 - getattr(nested_spin, 'pA')

                elif param_conv == '0.0':
                    val = 0.0

                else:
                    val = deepcopy(getattr(nested_spin, param_conv))

                # Set the attribute.
                setattr(spin, param, val)

        # Determine if model is equivalent, and should not be Grid searched, or if nested, and some parameters are pre-set. Here Grid search should still be issued.
        return equivalent
Exemple #33
0
def create_geometric_rep(format='PDB', file=None, dir=None, compress_type=0, size=30.0, inc=36, force=False):
    """Create a PDB file containing a geometric object representing the frame order dynamics.

    @keyword format:        The format for outputting the geometric representation.  Currently only the 'PDB' format is supported.
    @type format:           str
    @keyword file:          The name of the file of the PDB representation of the frame order dynamics to create.
    @type file:             str
    @keyword dir:           The name of the directory to place the PDB file into.
    @type dir:              str
    @keyword compress_type: The compression type.  The integer values correspond to the compression type: 0, no compression; 1, Bzip2 compression; 2, Gzip compression.
    @type compress_type:    int
    @keyword size:          The size of the geometric object in Angstroms.
    @type size:             float
    @keyword inc:           The number of increments for the filling of the cone objects.
    @type inc:              int
    @keyword force:         Flag which if set to True will cause any pre-existing file to be overwritten.
    @type force:            bool
    """

    # Printout.
    subsection(file=sys.stdout, text="Creating a PDB file containing a geometric object representing the frame order dynamics.")

    # Checks.
    check_parameters(escalate=2)

    # Initialise.
    titles = []
    structures = []
    representation = []
    sims = []
    file_root = []

    # Symmetry for inverted representations?
    sym = True
    if cdp.model in [MODEL_ROTOR, MODEL_FREE_ROTOR, MODEL_DOUBLE_ROTOR]:
        sym = False

    # The standard representation.
    titles.append("Representation A")
    structures.append(Internal())
    if sym:
        representation.append('A')
        file_root.append("%s_A" % file)
    else:
        representation.append(None)
        file_root.append(file)
    sims.append(False)

    # The inverted representation.
    if sym:
        titles.append("Representation A")
        structures.append(Internal())
        representation.append('B')
        file_root.append("%s_B" % file)
        sims.append(False)

    # The standard MC simulation representation.
    if hasattr(cdp, 'sim_number'):
        titles.append("MC simulation representation A")
        structures.append(Internal())
        if sym:
            representation.append('A')
            file_root.append("%s_sim_A" % file)
        else:
            representation.append(None)
            file_root.append("%s_sim" % file)
        sims.append(True)

    # The inverted MC simulation representation.
    if hasattr(cdp, 'sim_number') and sym:
        titles.append("MC simulation representation B")
        structures.append(Internal())
        representation.append('B')
        file_root.append("%s_sim_B" % file)
        sims.append(True)

    # Loop over each structure and add the contents.
    for i in range(len(structures)):
        # Printout.
        subsubsection(file=sys.stdout, text="Creating the %s." % titles[i])

        # Create a model for each Monte Carlo simulation.
        if sims[i]:
            for sim_i in range(cdp.sim_number):
                structures[i].add_model(model=sim_i+1)

        # Add the pivots.
        add_pivots(structure=structures[i], sims=sims[i])

        # Add all rotor objects.
        add_rotors(structure=structures[i], representation=representation[i], size=size, sims=sims[i])

        # Add the axis systems.
        add_axes(structure=structures[i], representation=representation[i], size=size, sims=sims[i])

        # Add the cone objects.
        if cdp.model not in [MODEL_ROTOR, MODEL_FREE_ROTOR, MODEL_DOUBLE_ROTOR]:
            add_cones(structure=structures[i], representation=representation[i], size=size, inc=inc, sims=sims[i])

        # Add atoms for creating titles.
        add_titles(structure=structures[i], representation=representation[i], displacement=size+10, sims=sims[i])

        # Create the PDB file.
        if format == 'PDB':
            pdb_file = open_write_file(file_root[i]+'.pdb', dir, compress_type=compress_type, force=force)
            structures[i].write_pdb(pdb_file)
            pdb_file.close()
Exemple #34
0
    def nesting(self, model=None):
        """Support for model nesting.

        If model nesting is detected, the optimised parameters from the simpler model will be used for the more complex model.  The method will then signal if the nesting condition is met for the model, allowing the grid search to be skipped.


        @keyword model: The model to be optimised.
        @type model:    str
        @return:        True if the model is the more complex model in a nested pair and the parameters of the simpler model have been copied.  False otherwise.
        @rtype:         bool
        """

        # Printout. 
        subsection(file=sys.stdout, text="Nesting and model equivalence checks", prespace=1)

        # The simpler model.
        nested_pipe = None
        if model == MODEL_LM63_3SITE and MODEL_LM63 in self.models:
            nested_pipe = MODEL_LM63
        if model == MODEL_CR72_FULL and MODEL_CR72 in self.models:
            nested_pipe = MODEL_CR72
        if model == MODEL_MMQ_CR72 and MODEL_CR72 in self.models:
            nested_pipe = MODEL_CR72
        if model == MODEL_NS_CPMG_2SITE_3D_FULL and MODEL_NS_CPMG_2SITE_3D in self.models:
            nested_pipe = MODEL_NS_CPMG_2SITE_3D
        if model == MODEL_NS_CPMG_2SITE_STAR_FULL and MODEL_NS_CPMG_2SITE_STAR in self.models:
            nested_pipe = MODEL_NS_CPMG_2SITE_STAR
        if model == MODEL_NS_MMQ_3SITE_LINEAR and MODEL_NS_MMQ_2SITE in self.models:
            nested_pipe = MODEL_NS_MMQ_2SITE
        if model == MODEL_NS_MMQ_3SITE:
            if MODEL_NS_MMQ_3SITE_LINEAR in self.models:
                nested_pipe = MODEL_NS_MMQ_3SITE_LINEAR
            elif MODEL_NS_MMQ_2SITE in self.models:
                nested_pipe = MODEL_NS_MMQ_2SITE
        if model == MODEL_NS_R1RHO_3SITE_LINEAR and MODEL_NS_R1RHO_2SITE in self.models:
            nested_pipe = MODEL_NS_R1RHO_2SITE
        if model == MODEL_NS_R1RHO_3SITE:
            if MODEL_NS_R1RHO_3SITE_LINEAR in self.models:
                nested_pipe = MODEL_NS_R1RHO_3SITE_LINEAR
            elif MODEL_NS_R1RHO_2SITE in self.models:
                nested_pipe = MODEL_NS_R1RHO_2SITE


        # Using the analytic solution.
        analytic = False
        if model in [MODEL_NS_CPMG_2SITE_3D, MODEL_NS_CPMG_2SITE_EXPANDED, MODEL_NS_CPMG_2SITE_STAR] and MODEL_CR72 in self.models:
            nested_pipe = MODEL_CR72
            analytic = True
        elif model == MODEL_NS_MMQ_2SITE and MODEL_MMQ_CR72 in self.models:
            nested_pipe = MODEL_MMQ_CR72
            analytic = True
        if model == MODEL_NS_R1RHO_2SITE and MODEL_MP05 in self.models:
            nested_pipe = MODEL_MP05
            analytic = True

        # No nesting.
        if not nested_pipe:
            print("No model nesting or model equivalence detected.")
            return False

        # Printout.
        if analytic:
            print("Model equivalence detected, copying the optimised parameters from the analytic '%s' model rather than performing a grid search." % nested_pipe)
        else:
            print("Model nesting detected, copying the optimised parameters from the '%s' model rather than performing a grid search." % nested_pipe)

        # Loop over the spins to copy the parameters.
        for spin, spin_id in spin_loop(return_id=True, skip_desel=True):
            # Get the nested spin.
            nested_spin = return_spin(spin_id=spin_id, pipe=nested_pipe)

            # The R20 parameters.
            if hasattr(nested_spin, 'r2'):
                if model in [MODEL_CR72_FULL, MODEL_NS_CPMG_2SITE_3D_FULL, MODEL_NS_CPMG_2SITE_STAR_FULL]:
                    setattr(spin, 'r2a', deepcopy(nested_spin.r2))
                    setattr(spin, 'r2b', deepcopy(nested_spin.r2))
                else:
                    setattr(spin, 'r2', deepcopy(nested_spin.r2))

            # The LM63 3-site model parameters.
            if model == MODEL_LM63_3SITE:
                setattr(spin, 'phi_ex_B', deepcopy(nested_spin.phi_ex))
                setattr(spin, 'phi_ex_C', deepcopy(nested_spin.phi_ex))
                setattr(spin, 'kB', deepcopy(nested_spin.kex))
                setattr(spin, 'kC', deepcopy(nested_spin.kex))

            # All other spin parameters.
            for param in spin.params:
                if param in ['r2', 'r2a', 'r2b']:
                    continue

                # The parameter does not exist.
                if not hasattr(nested_spin, param):
                    continue

                # Skip the LM63 3-site model parameters
                if model == MODEL_LM63_3SITE and param in ['phi_ex', 'kex']:
                    continue

                # Copy the parameter.
                setattr(spin, param, deepcopy(getattr(nested_spin, param)))

        # Nesting.
        return True
Exemple #35
0
def estimate_r2eff(method='minfx', min_algor='simplex', c_code=True, constraints=False, chi2_jacobian=False, spin_id=None, ftol=1e-15, xtol=1e-15, maxfev=10000000, factor=100.0, verbosity=1):
    """Estimate r2eff and errors by exponential curve fitting with scipy.optimize.leastsq or minfx.

    THIS IS ONLY FOR TESTING.

    scipy.optimize.leastsq is a wrapper around MINPACK's lmdif and lmder algorithms.

    MINPACK is a FORTRAN90 library which solves systems of nonlinear equations, or carries out the least squares minimization of the residual of a set of linear or nonlinear equations.

    Errors are calculated by taking the square root of the reported co-variance.

    This can be an huge time saving step, when performing model fitting in R1rho.
    Errors of R2eff values, are normally estimated by time-consuming Monte-Carlo simulations.

    Initial guess for the starting parameter x0 = [r2eff_est, i0_est], is by converting the exponential curve to a linear problem.
    Then solving initial guess by linear least squares of: ln(Intensity[j]) = ln(i0) - time[j]* r2eff.


    @keyword method:            The method to minimise and estimate errors.  Options are: 'minfx' or 'scipy.optimize.leastsq'.
    @type method:               string
    @keyword min_algor:         The minimisation algorithm
    @type min_algor:            string
    @keyword c_code:            If optimise with C code.
    @type c_code:               bool
    @keyword constraints:       If constraints should be used.
    @type constraints:          bool
    @keyword chi2_jacobian:     If the chi2 Jacobian should be used.
    @type chi2_jacobian:        bool
    @keyword spin_id:           The spin identification string.
    @type spin_id:              str
    @keyword ftol:              The function tolerance for the relative error desired in the sum of squares, parsed to leastsq.
    @type ftol:                 float
    @keyword xtol:              The error tolerance for the relative error desired in the approximate solution, parsed to leastsq.
    @type xtol:                 float
    @keyword maxfev:            The maximum number of function evaluations, parsed to leastsq.  If zero, then 100*(N+1) is the maximum function calls.  N is the number of elements in x0=[r2eff, i0].
    @type maxfev:               int
    @keyword factor:            The initial step bound, parsed to leastsq.  It determines the initial step bound (''factor * || diag * x||'').  Should be in the interval (0.1, 100).
    @type factor:               float
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    """

    # Perform checks.
    check_model_type(model=MODEL_R2EFF)

    # Check that the C modules have been compiled.
    if not C_module_exp_fn and method == 'minfx':
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Set class scipy setting.
    E = Exp(verbosity=verbosity)
    E.set_settings_leastsq(ftol=ftol, xtol=xtol, maxfev=maxfev, factor=factor)

    # Check if intensity errors have already been calculated by the user.
    precalc = True
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(selection=spin_id, full_info=True, return_id=True, skip_desel=True):
        # No structure.
        if not hasattr(cur_spin, 'peak_intensity_err'):
            precalc = False
            break

        # Determine if a spectrum ID is missing from the list.
        for id in cdp.spectrum_ids:
            if id not in cur_spin.peak_intensity_err:
                precalc = False
                break

    # Loop over the spins.
    for cur_spin, mol_name, resi, resn, cur_spin_id in spin_loop(selection=spin_id, full_info=True, return_id=True, skip_desel=True):
        # Generate spin string.
        spin_string = generate_spin_string(spin=cur_spin, mol_name=mol_name, res_num=resi, res_name=resn)

        # Print information.
        if E.verbosity >= 1:
            # Individual spin block section.
            top = 2
            if E.verbosity >= 2:
                top += 2
            subsection(file=sys.stdout, text="Fitting with %s to: %s"%(method, spin_string), prespace=top)
            if method == 'minfx':
                subsection(file=sys.stdout, text="min_algor='%s', c_code=%s, constraints=%s, chi2_jacobian?=%s"%(min_algor, c_code, constraints, chi2_jacobian), prespace=0)

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point, ei, mi, oi, di in loop_exp_frq_offset_point(return_indices=True):
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                values.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time))
                errors.append(average_intensity(spin=cur_spin, exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                times.append(time)

            # Convert to numpy array.
            values = asarray(values)
            errors = asarray(errors)
            times = asarray(times)

            # Initialise data.
            E.setup_data(values=values, errors=errors, times=times)

            # Get the result based on method.
            if method == 'scipy.optimize.leastsq':
                # Acquire results.
                results = minimise_leastsq(E=E)

            elif method == 'minfx':
                # Set settings.
                E.set_settings_minfx(min_algor=min_algor, c_code=c_code, chi2_jacobian=chi2_jacobian, constraints=constraints)

                # Acquire results.
                results = minimise_minfx(E=E)
            else:
                raise RelaxError("Method for minimisation not known. Try setting: method='scipy.optimize.leastsq'.")

            # Unpack results
            param_vector, param_vector_error, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Extract values.
            r2eff = param_vector[0]
            i0 = param_vector[1]
            r2eff_err = param_vector_error[0]
            i0_err = param_vector_error[1]

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spins=[cur_spin], key=param_key)

            # Errors.
            if not hasattr(cur_spin, 'r2eff_err'):
                setattr(cur_spin, 'r2eff_err', deepcopy(getattr(cur_spin, 'r2eff')))
            if not hasattr(cur_spin, 'i0_err'):
                setattr(cur_spin, 'i0_err', deepcopy(getattr(cur_spin, 'i0')))

            # Set error.
            cur_spin.r2eff_err[param_key] = r2eff_err
            cur_spin.i0_err[param_key] = i0_err

            # Chi-squared statistic.
            cur_spin.chi2 = chi2

            # Iterations.
            cur_spin.f_count = f_count

            # Warning.
            cur_spin.warning = warning

            # Print information.
            print_strings = []
            if E.verbosity >= 1:
                # Add print strings.
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (exp_type, frq/1E6, offset, point, len(times))
                print_strings.append(point_info)

                par_info = "r2eff=%3.3f r2eff_err=%3.4f, i0=%6.1f, i0_err=%3.4f, chi2=%3.3f.\n" % ( r2eff, r2eff_err, i0, i0_err, chi2)
                print_strings.append(par_info)

                if E.verbosity >= 2:
                    time_info = ', '.join(map(str, times))
                    print_strings.append('For time array: '+time_info+'.\n\n')

            # Print info
            if len(print_strings) > 0:
                for print_string in print_strings:
                    print(print_string),