Exemplo n.º 1
0
    def overfit_deselect(self, data_check=True, verbose=True):
        """Deselect spins which have insufficient data to support calculation.

        @keyword data_check:    A flag to signal if the presence of base data is to be checked for.
        @type data_check:       bool
        @keyword verbose:       A flag which if True will allow printouts.
        @type verbose:          bool
        """

        # Print out.
        if verbose:
            print("\nOver-fit spin deselection:")

        # Test the sequence data exists.
        if not exists_mol_res_spin_data():
            raise RelaxNoSequenceError

        # Loop over spin data.
        deselect_flag = False
        spin_count = 0
        for spin, spin_id in spin_loop(return_id=True):
            # Skip deselected spins.
            if not spin.select:
                continue

            # The interatomic data.
            interatoms = return_interatom_list(spin_id)

            # Loop over the interatomic data.
            dipole_relax = False
            for i in range(len(interatoms)):
                # No dipolar relaxation mechanism.
                if not interatoms[i].dipole_pair:
                    continue

                # The surrounding spins.
                if spin_id != interatoms[i].spin_id1:
                    spin_id2 = interatoms[i].spin_id1
                else:
                    spin_id2 = interatoms[i].spin_id2
                spin2 = return_spin(spin_id2)

                # Dipolar relaxation flag.
                dipole_relax = True

            # No relaxation mechanism.
            if not dipole_relax or not hasattr(spin, 'csa') or spin.csa == None:
                warn(RelaxDeselectWarning(spin_id, 'an absence of relaxation mechanisms'))
                spin.select = False
                deselect_flag = True
                continue

            # Data checks.
            if data_check:
                # The number of relaxation data points (and for infinite data).
                data_points = 0
                inf_data = False
                if hasattr(cdp, 'ri_ids') and hasattr(spin, 'ri_data'):
                    for id in cdp.ri_ids:
                        if id in spin.ri_data and spin.ri_data[id] != None:
                            data_points += 1

                            # Infinite data!
                            if isInf(spin.ri_data[id]):
                                inf_data = True

                # Infinite data.
                if inf_data:
                    warn(RelaxDeselectWarning(spin_id, 'infinite relaxation data'))
                    spin.select = False
                    deselect_flag = True
                    continue

                # Relaxation data must exist!
                if not hasattr(spin, 'ri_data'):
                    warn(RelaxDeselectWarning(spin_id, 'missing relaxation data'))
                    spin.select = False
                    deselect_flag = True
                    continue

                # Require 3 or more relaxation data points.
                if data_points < 3:
                    warn(RelaxDeselectWarning(spin_id, 'insufficient relaxation data, 3 or more data points are required'))
                    spin.select = False
                    deselect_flag = True
                    continue

            # Increment the spin number.
            spin_count += 1

        # No spins selected, so fail hard to prevent the user from going any further.
        if spin_count == 0:
            warn(RelaxWarning("No spins are selected therefore the optimisation or calculation cannot proceed."))

        # Final printout.
        if verbose and not deselect_flag:
            print("No spins have been deselected.")
Exemplo n.º 2
0
def disassemble_result(
    param_vector=None,
    func=None,
    iter=None,
    fc=None,
    gc=None,
    hc=None,
    warning=None,
    spin=None,
    sim_index=None,
    model_type=None,
    scaling_matrix=None,
):
    """Disassemble the optimisation results.

    @keyword param_vector:      The model-free parameter vector.
    @type param_vector:         numpy array
    @keyword func:              The optimised chi-squared value.
    @type func:                 float
    @keyword iter:              The number of optimisation steps required to find the minimum.
    @type iter:                 int
    @keyword fc:                The function count.
    @type fc:                   int
    @keyword gc:                The gradient count.
    @type gc:                   int
    @keyword hc:                The Hessian count.
    @type hc:                   int
    @keyword warning:           Any optimisation warnings.
    @type warning:              str or None
    @keyword spin:              The spin container.
    @type spin:                 SpinContainer instance or None
    @keyword sim_index:         The Monte Carlo simulation index.
    @type sim_index:            int or None
    @keyword model_type:        The model-free model type, one of 'mf', 'local_tm', 'diff', or
                                'all'.
    @type model_type:           str
    @keyword scaling_matrix:    The diagonal, square scaling matrix.
    @type scaling_matrix:       numpy diagonal matrix
    """

    # No result.
    if param_vector is None:
        return

    # Alias the current data pipe.
    cdp = pipes.get_pipe()

    # Catch infinite chi-squared values.
    if isInf(func):
        raise RelaxInfError("chi-squared")

    # Catch chi-squared values of NaN.
    if isNaN(func):
        raise RelaxNaNError("chi-squared")

    # Scaling.
    if scaling_matrix is not None:
        param_vector = dot(scaling_matrix, param_vector)

    # Check if the chi-squared value is lower.  This allows for a parallelised grid search!
    if sim_index == None:
        # Get the correct value.
        chi2 = None
        if (model_type == "mf" or model_type == "local_tm") and hasattr(cdp, "chi2"):
            chi2 = spin.chi2
        if (model_type == "diff" or model_type == "all") and hasattr(cdp, "chi2"):
            chi2 = cdp.chi2

        # Spin text.
        spin_text = ""
        if spin != None and hasattr(spin, "_spin_ids") and len(spin._spin_ids):
            spin_text = " for the spin '%s'" % spin._spin_ids[0]

        # No improvement.
        if chi2 != None and func >= chi2:
            print(
                "Discarding the optimisation results%s, the optimised chi-squared value is higher than the current value (%s >= %s)."
                % (spin_text, func, chi2)
            )

            # Exit!
            return

        # New minimum.
        else:
            print(
                "Storing the optimisation results%s, the optimised chi-squared value is lower than the current value (%s < %s)."
                % (spin_text, func, chi2)
            )

    # Disassemble the parameter vector.
    disassemble_param_vector(model_type, param_vector=param_vector, spin=spin, sim_index=sim_index)

    # Monte Carlo minimisation statistics.
    if sim_index != None:
        # Sequence specific minimisation statistics.
        if model_type == "mf" or model_type == "local_tm":

            # Chi-squared statistic.
            spin.chi2_sim[sim_index] = func

            # Iterations.
            spin.iter_sim[sim_index] = iter

            # Function evaluations.
            spin.f_count_sim[sim_index] = fc

            # Gradient evaluations.
            spin.g_count_sim[sim_index] = gc

            # Hessian evaluations.
            spin.h_count_sim[sim_index] = hc

            # Warning.
            spin.warning_sim[sim_index] = warning

        # Global minimisation statistics.
        elif model_type == "diff" or model_type == "all":
            # Chi-squared statistic.
            cdp.chi2_sim[sim_index] = func

            # Iterations.
            cdp.iter_sim[sim_index] = iter

            # Function evaluations.
            cdp.f_count_sim[sim_index] = fc

            # Gradient evaluations.
            cdp.g_count_sim[sim_index] = gc

            # Hessian evaluations.
            cdp.h_count_sim[sim_index] = hc

            # Warning.
            cdp.warning_sim[sim_index] = warning

    # Normal statistics.
    else:
        # Sequence specific minimisation statistics.
        if model_type == "mf" or model_type == "local_tm":
            # Chi-squared statistic.
            spin.chi2 = func

            # Iterations.
            spin.iter = iter

            # Function evaluations.
            spin.f_count = fc

            # Gradient evaluations.
            spin.g_count = gc

            # Hessian evaluations.
            spin.h_count = hc

            # Warning.
            spin.warning = warning

        # Global minimisation statistics.
        elif model_type == "diff" or model_type == "all":
            # Chi-squared statistic.
            cdp.chi2 = func

            # Iterations.
            cdp.iter = iter

            # Function evaluations.
            cdp.f_count = fc

            # Gradient evaluations.
            cdp.g_count = gc

            # Hessian evaluations.
            cdp.h_count = hc

            # Warning.
            cdp.warning = warning
Exemplo n.º 3
0
Arquivo: api.py Projeto: tlinnet/relax
    def overfit_deselect(self, data_check=True, verbose=True):
        """Deselect spins which have insufficient data to support calculation.

        @keyword data_check:    A flag to signal if the presence of base data is to be checked for.
        @type data_check:       bool
        @keyword verbose:       A flag which if True will allow printouts.
        @type verbose:          bool
        """

        # Print out.
        if verbose:
            print("\nOver-fit spin deselection:")

        # Test the sequence data exists.
        if not exists_mol_res_spin_data():
            raise RelaxNoSequenceError

        # Loop over spin data.
        deselect_flag = False
        spin_count = 0
        for spin, spin_id in spin_loop(return_id=True):
            # Skip deselected spins.
            if not spin.select:
                continue

            # The interatomic data.
            interatoms = return_interatom_list(spin_hash=spin._hash)

            # Loop over the interatomic data.
            dipole_relax = False
            for i in range(len(interatoms)):
                # No dipolar relaxation mechanism.
                if not interatoms[i].dipole_pair:
                    continue

                # The surrounding spins.
                if spin_id != interatoms[i].spin_id1:
                    spin_id2 = interatoms[i].spin_id1
                else:
                    spin_id2 = interatoms[i].spin_id2
                spin2 = return_spin(spin_id=spin_id2)

                # Dipolar relaxation flag.
                dipole_relax = True

            # No relaxation mechanism.
            if not dipole_relax or not hasattr(spin, 'csa') or spin.csa == None:
                warn(RelaxDeselectWarning(spin_id, 'an absence of relaxation mechanisms'))
                spin.select = False
                deselect_flag = True
                continue

            # Data checks.
            if data_check:
                # The number of relaxation data points (and for infinite data).
                data_points = 0
                inf_data = False
                if hasattr(cdp, 'ri_ids') and hasattr(spin, 'ri_data'):
                    for id in cdp.ri_ids:
                        if id in spin.ri_data and spin.ri_data[id] != None:
                            data_points += 1

                            # Infinite data!
                            if isInf(spin.ri_data[id]):
                                inf_data = True

                # Infinite data.
                if inf_data:
                    warn(RelaxDeselectWarning(spin_id, 'infinite relaxation data'))
                    spin.select = False
                    deselect_flag = True
                    continue

                # Relaxation data must exist!
                if not hasattr(spin, 'ri_data'):
                    warn(RelaxDeselectWarning(spin_id, 'missing relaxation data'))
                    spin.select = False
                    deselect_flag = True
                    continue

                # Require 3 or more relaxation data points.
                if data_points < 3:
                    warn(RelaxDeselectWarning(spin_id, 'insufficient relaxation data, 3 or more data points are required'))
                    spin.select = False
                    deselect_flag = True
                    continue

            # Increment the spin number.
            spin_count += 1

        # No spins selected, so fail hard to prevent the user from going any further.
        if spin_count == 0:
            warn(RelaxWarning("No spins are selected therefore the optimisation or calculation cannot proceed."))

        # Final printout.
        if verbose and not deselect_flag:
            print("No spins have been deselected.")
Exemplo n.º 4
0
def disassemble_result(param_vector=None,
                       func=None,
                       iter=None,
                       fc=None,
                       gc=None,
                       hc=None,
                       warning=None,
                       spin=None,
                       sim_index=None,
                       model_type=None,
                       scaling_matrix=None):
    """Disassemble the optimisation results.

    @keyword param_vector:      The model-free parameter vector.
    @type param_vector:         numpy array
    @keyword func:              The optimised chi-squared value.
    @type func:                 float
    @keyword iter:              The number of optimisation steps required to find the minimum.
    @type iter:                 int
    @keyword fc:                The function count.
    @type fc:                   int
    @keyword gc:                The gradient count.
    @type gc:                   int
    @keyword hc:                The Hessian count.
    @type hc:                   int
    @keyword warning:           Any optimisation warnings.
    @type warning:              str or None
    @keyword spin:              The spin container.
    @type spin:                 SpinContainer instance or None
    @keyword sim_index:         The Monte Carlo simulation index.
    @type sim_index:            int or None
    @keyword model_type:        The model-free model type, one of 'mf', 'local_tm', 'diff', or
                                'all'.
    @type model_type:           str
    @keyword scaling_matrix:    The diagonal, square scaling matrix.
    @type scaling_matrix:       numpy diagonal matrix
    """

    # No result.
    if param_vector is None:
        return

    # Alias the current data pipe.
    cdp = pipes.get_pipe()

    # Catch infinite chi-squared values.
    if isInf(func):
        raise RelaxInfError('chi-squared')

    # Catch chi-squared values of NaN.
    if isNaN(func):
        raise RelaxNaNError('chi-squared')

    # Scaling.
    if scaling_matrix is not None:
        param_vector = dot(scaling_matrix, param_vector)

    # Check if the chi-squared value is lower.  This allows for a parallelised grid search!
    if sim_index == None:
        # Get the correct value.
        chi2 = None
        if (model_type == 'mf' or model_type == 'local_tm') and hasattr(
                cdp, 'chi2'):
            chi2 = spin.chi2
        if (model_type == 'diff' or model_type == 'all') and hasattr(
                cdp, 'chi2'):
            chi2 = cdp.chi2

        # Spin text.
        spin_text = ''
        if spin != None and hasattr(spin, '_spin_ids') and len(spin._spin_ids):
            spin_text = " for the spin '%s'" % spin._spin_ids[0]

        # No improvement.
        if chi2 != None and func >= chi2:
            print(
                "Discarding the optimisation results%s, the optimised chi-squared value is higher than the current value (%s >= %s)."
                % (spin_text, func, chi2))

            # Exit!
            return

        # New minimum.
        else:
            print(
                "Storing the optimisation results%s, the optimised chi-squared value is lower than the current value (%s < %s)."
                % (spin_text, func, chi2))

    # Disassemble the parameter vector.
    disassemble_param_vector(model_type,
                             param_vector=param_vector,
                             spin=spin,
                             sim_index=sim_index)

    # Monte Carlo minimisation statistics.
    if sim_index != None:
        # Sequence specific minimisation statistics.
        if model_type == 'mf' or model_type == 'local_tm':

            # Chi-squared statistic.
            spin.chi2_sim[sim_index] = func

            # Iterations.
            spin.iter_sim[sim_index] = iter

            # Function evaluations.
            spin.f_count_sim[sim_index] = fc

            # Gradient evaluations.
            spin.g_count_sim[sim_index] = gc

            # Hessian evaluations.
            spin.h_count_sim[sim_index] = hc

            # Warning.
            spin.warning_sim[sim_index] = warning

        # Global minimisation statistics.
        elif model_type == 'diff' or model_type == 'all':
            # Chi-squared statistic.
            cdp.chi2_sim[sim_index] = func

            # Iterations.
            cdp.iter_sim[sim_index] = iter

            # Function evaluations.
            cdp.f_count_sim[sim_index] = fc

            # Gradient evaluations.
            cdp.g_count_sim[sim_index] = gc

            # Hessian evaluations.
            cdp.h_count_sim[sim_index] = hc

            # Warning.
            cdp.warning_sim[sim_index] = warning

    # Normal statistics.
    else:
        # Sequence specific minimisation statistics.
        if model_type == 'mf' or model_type == 'local_tm':
            # Chi-squared statistic.
            spin.chi2 = func

            # Iterations.
            spin.iter = iter

            # Function evaluations.
            spin.f_count = fc

            # Gradient evaluations.
            spin.g_count = gc

            # Hessian evaluations.
            spin.h_count = hc

            # Warning.
            spin.warning = warning

        # Global minimisation statistics.
        elif model_type == 'diff' or model_type == 'all':
            # Chi-squared statistic.
            cdp.chi2 = func

            # Iterations.
            cdp.iter = iter

            # Function evaluations.
            cdp.f_count = fc

            # Gradient evaluations.
            cdp.g_count = gc

            # Hessian evaluations.
            cdp.h_count = hc

            # Warning.
            cdp.warning = warning
Exemplo n.º 5
0
    def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
        """Minimisation function.

        @param min_algor:           The minimisation algorithm to use.
        @type min_algor:            str
        @param min_options:         An array of options to be used by the minimisation algorithm.
        @type min_options:          array of str
        @param func_tol:            The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check.
        @type func_tol:             None or float
        @param grad_tol:            The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check.
        @type grad_tol:             None or float
        @param max_iterations:      The maximum number of iterations for the algorithm.
        @type max_iterations:       int
        @param constraints:         If True, constraints are used during optimisation.
        @type constraints:          bool
        @keyword scaling_matrix:    The per-model list of diagonal and square scaling matrices.
        @type scaling_matrix:       list of numpy rank-2, float64 array or list of None
        @param verbosity:           A flag specifying the amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:            int
        @param sim_index:           The index of the simulation to optimise.  This should be None if normal optimisation is desired.
        @type sim_index:            None or int
        @keyword lower:             The per-model lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type lower:                list of lists of numbers
        @keyword upper:             The per-model upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type upper:                list of lists of numbers
        @keyword inc:               The per-model increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
        @type inc:                  list of lists of int
        """

        # Set up the target function for direct calculation.
        model, param_vector, data_types = target_fn_setup(sim_index=sim_index, scaling_matrix=scaling_matrix[0], verbosity=verbosity)

        # Nothing to do!
        if not len(param_vector):
            warn(RelaxWarning("The model has no parameters, minimisation cannot be performed."))
            return

        # Right, constraints cannot be used for the 'fixed' model.
        if constraints and cdp.model == 'fixed':
            if verbosity:
                warn(RelaxWarning("Turning constraints off.  These cannot be used for the 'fixed' model."))
            constraints = False

            # Pop out the Method of Multipliers algorithm.
            if min_algor == 'Method of Multipliers':
                min_algor = min_options[0]
                min_options = min_options[1:]

        # And constraints absolutely must be used for the 'population' model.
        if not constraints and cdp.model == 'population':
            warn(RelaxWarning("Turning constraints on.  These absolutely must be used for the 'population' model."))
            constraints = True

            # Add the Method of Multipliers algorithm.
            min_options = (min_algor,) + min_options
            min_algor = 'Method of Multipliers'

        # Disallow Newton optimisation and other Hessian optimisers for the paramagnetic centre position optimisation (the PCS Hessian is not yet implemented).
        if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed:
            if min_algor in ['newton']:
                raise RelaxError("For the paramagnetic centre position, as the Hessians are not yet implemented Newton optimisation cannot be performed.")

        # Linear constraints.
        A, b = None, None
        if constraints:
            A, b = linear_constraints(data_types=data_types, scaling_matrix=scaling_matrix[0])

        # Grid search.
        if search('^[Gg]rid', min_algor):
            # The search.
            results = grid(func=model.func, args=(), num_incs=inc[0], lower=lower[0], upper=upper[0], A=A, b=b, verbosity=verbosity)

            # Unpack the results.
            param_vector, func, iter_count, warning = results
            f_count = iter_count
            g_count = 0.0
            h_count = 0.0

        # Minimisation.
        else:
            results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=1, print_flag=verbosity)

            # Unpack the results.
            if results == None:
                return
            param_vector, func, iter_count, f_count, g_count, h_count, warning = results

        # Catch infinite chi-squared values.
        if isInf(func):
            raise RelaxInfError('chi-squared')

        # Catch chi-squared values of NaN.
        if isNaN(func):
            raise RelaxNaNError('chi-squared')

        # Make a last function call to update the back-calculated RDC and PCS structures to the optimal values.
        chi2 = model.func(param_vector)

        # Scaling.
        if scaling_matrix[0] is not None:
            param_vector = dot(scaling_matrix[0], param_vector)

        # Disassemble the parameter vector.
        disassemble_param_vector(param_vector=param_vector, data_types=data_types, sim_index=sim_index)

        # Monte Carlo minimisation statistics.
        if sim_index != None:
            # Chi-squared statistic.
            cdp.chi2_sim[sim_index] = func

            # Iterations.
            cdp.iter_sim[sim_index] = iter_count

            # Function evaluations.
            cdp.f_count_sim[sim_index] = f_count

            # Gradient evaluations.
            cdp.g_count_sim[sim_index] = g_count

            # Hessian evaluations.
            cdp.h_count_sim[sim_index] = h_count

            # Warning.
            cdp.warning_sim[sim_index] = warning

        # Normal statistics.
        else:
            # Chi-squared statistic.
            cdp.chi2 = func

            # Iterations.
            cdp.iter = iter_count

            # Function evaluations.
            cdp.f_count = f_count

            # Gradient evaluations.
            cdp.g_count = g_count

            # Hessian evaluations.
            cdp.h_count = h_count

            # Warning.
            cdp.warning = warning

        # Statistical analysis.
        if 'rdc' in data_types or 'pcs' in data_types:
            # Get the final back calculated data (for the Q factor and
            minimise_bc_data(model, sim_index=sim_index)

            # Calculate the RDC Q factors.
            if 'rdc' in data_types:
                rdc.q_factors(sim_index=sim_index, verbosity=verbosity)

            # Calculate the PCS Q factors.
            if 'pcs' in data_types:
                pcs.q_factors(sim_index=sim_index, verbosity=verbosity)
Exemplo n.º 6
0
Arquivo: api.py Projeto: tlinnet/relax
    def minimise(self,
                 min_algor=None,
                 min_options=None,
                 func_tol=None,
                 grad_tol=None,
                 max_iterations=None,
                 constraints=False,
                 scaling_matrix=None,
                 verbosity=0,
                 sim_index=None,
                 lower=None,
                 upper=None,
                 inc=None):
        """Minimisation function.

        @param min_algor:           The minimisation algorithm to use.
        @type min_algor:            str
        @param min_options:         An array of options to be used by the minimisation algorithm.
        @type min_options:          array of str
        @param func_tol:            The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check.
        @type func_tol:             None or float
        @param grad_tol:            The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check.
        @type grad_tol:             None or float
        @param max_iterations:      The maximum number of iterations for the algorithm.
        @type max_iterations:       int
        @param constraints:         If True, constraints are used during optimisation.
        @type constraints:          bool
        @keyword scaling_matrix:    The per-model list of diagonal and square scaling matrices.
        @type scaling_matrix:       list of numpy rank-2, float64 array or list of None
        @param verbosity:           A flag specifying the amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:            int
        @param sim_index:           The index of the simulation to optimise.  This should be None if normal optimisation is desired.
        @type sim_index:            None or int
        @keyword lower:             The per-model lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type lower:                list of lists of numbers
        @keyword upper:             The per-model upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type upper:                list of lists of numbers
        @keyword inc:               The per-model increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
        @type inc:                  list of lists of int
        """

        # Set up the target function for direct calculation.
        model, param_vector, data_types = target_fn_setup(
            sim_index=sim_index,
            scaling_matrix=scaling_matrix[0],
            verbosity=verbosity)

        # Nothing to do!
        if not len(param_vector):
            warn(
                RelaxWarning(
                    "The model has no parameters, minimisation cannot be performed."
                ))
            return

        # Right, constraints cannot be used for the 'fixed' model.
        if constraints and cdp.model == 'fixed':
            if verbosity:
                warn(
                    RelaxWarning(
                        "Turning constraints off.  These cannot be used for the 'fixed' model."
                    ))
            constraints = False

            # Pop out the Method of Multipliers algorithm.
            if min_algor == 'Method of Multipliers':
                min_algor = min_options[0]
                min_options = min_options[1:]

        # And constraints absolutely must be used for the 'population' model.
        if not constraints and cdp.model == 'population':
            warn(
                RelaxWarning(
                    "Turning constraints on.  These absolutely must be used for the 'population' model."
                ))
            constraints = True

            # Add the Method of Multipliers algorithm.
            min_options = (min_algor, ) + min_options
            min_algor = 'Method of Multipliers'

        # Disallow Newton optimisation and other Hessian optimisers for the paramagnetic centre position optimisation (the PCS Hessian is not yet implemented).
        if hasattr(cdp,
                   'paramag_centre_fixed') and not cdp.paramag_centre_fixed:
            if min_algor in ['newton']:
                raise RelaxError(
                    "For the paramagnetic centre position, as the Hessians are not yet implemented Newton optimisation cannot be performed."
                )

        # Linear constraints.
        A, b = None, None
        if constraints:
            A, b = linear_constraints(data_types=data_types,
                                      scaling_matrix=scaling_matrix[0])

        # Grid search.
        if search('^[Gg]rid', min_algor):
            # The search.
            results = grid(func=model.func,
                           args=(),
                           num_incs=inc[0],
                           lower=lower[0],
                           upper=upper[0],
                           A=A,
                           b=b,
                           verbosity=verbosity)

            # Unpack the results.
            param_vector, func, iter_count, warning = results
            f_count = iter_count
            g_count = 0.0
            h_count = 0.0

        # Minimisation.
        else:
            results = generic_minimise(func=model.func,
                                       dfunc=model.dfunc,
                                       d2func=model.d2func,
                                       args=(),
                                       x0=param_vector,
                                       min_algor=min_algor,
                                       min_options=min_options,
                                       func_tol=func_tol,
                                       grad_tol=grad_tol,
                                       maxiter=max_iterations,
                                       A=A,
                                       b=b,
                                       full_output=1,
                                       print_flag=verbosity)

            # Unpack the results.
            if results == None:
                return
            param_vector, func, iter_count, f_count, g_count, h_count, warning = results

        # Catch infinite chi-squared values.
        if isInf(func):
            raise RelaxInfError('chi-squared')

        # Catch chi-squared values of NaN.
        if isNaN(func):
            raise RelaxNaNError('chi-squared')

        # Make a last function call to update the back-calculated RDC and PCS structures to the optimal values.
        chi2 = model.func(param_vector)

        # Scaling.
        if scaling_matrix[0] is not None:
            param_vector = dot(scaling_matrix[0], param_vector)

        # Disassemble the parameter vector.
        disassemble_param_vector(param_vector=param_vector,
                                 data_types=data_types,
                                 sim_index=sim_index)

        # Monte Carlo minimisation statistics.
        if sim_index != None:
            # Chi-squared statistic.
            cdp.chi2_sim[sim_index] = func

            # Iterations.
            cdp.iter_sim[sim_index] = iter_count

            # Function evaluations.
            cdp.f_count_sim[sim_index] = f_count

            # Gradient evaluations.
            cdp.g_count_sim[sim_index] = g_count

            # Hessian evaluations.
            cdp.h_count_sim[sim_index] = h_count

            # Warning.
            cdp.warning_sim[sim_index] = warning

        # Normal statistics.
        else:
            # Chi-squared statistic.
            cdp.chi2 = func

            # Iterations.
            cdp.iter = iter_count

            # Function evaluations.
            cdp.f_count = f_count

            # Gradient evaluations.
            cdp.g_count = g_count

            # Hessian evaluations.
            cdp.h_count = h_count

            # Warning.
            cdp.warning = warning

        # Statistical analysis.
        if 'rdc' in data_types or 'pcs' in data_types:
            # Get the final back calculated data (for the Q factor and
            minimise_bc_data(model, sim_index=sim_index)

            # Calculate the RDC Q factors.
            if 'rdc' in data_types:
                rdc.q_factors(sim_index=sim_index, verbosity=verbosity)

            # Calculate the PCS Q factors.
            if 'pcs' in data_types:
                pcs.q_factors(sim_index=sim_index, verbosity=verbosity)