Ejemplo n.º 1
0
    def optimise(self):
        """Model-free optimisation.

        @return:    The optimisation results consisting of the parameter vector, function value, iteration count, function count, gradient count, Hessian count, and warnings.
        @rtype:     tuple of numpy array, float, int, int, int, int, str
        """

        # Minimisation.
        results = generic_minimise(
            func=self.mf.func,
            dfunc=self.mf.dfunc,
            d2func=self.mf.d2func,
            args=(),
            x0=self.opt_params.param_vector,
            min_algor=self.opt_params.min_algor,
            min_options=self.opt_params.min_options,
            func_tol=self.opt_params.func_tol,
            grad_tol=self.opt_params.grad_tol,
            maxiter=self.opt_params.max_iterations,
            A=self.opt_params.A,
            b=self.opt_params.b,
            full_output=True,
            print_flag=self.opt_params.verbosity,
        )

        # Return the minfx results unmodified.
        return results
Ejemplo n.º 2
0
    def optimise(self):
        """Model-free optimisation.

        @return:    The optimisation results consisting of the parameter vector, function value, iteration count, function count, gradient count, Hessian count, and warnings.
        @rtype:     tuple of numpy array, float, int, int, int, int, str
        """

        # Minimisation.
        results = generic_minimise(func=self.mf.func,
                                   dfunc=self.mf.dfunc,
                                   d2func=self.mf.d2func,
                                   args=(),
                                   x0=self.opt_params.param_vector,
                                   min_algor=self.opt_params.min_algor,
                                   min_options=self.opt_params.min_options,
                                   func_tol=self.opt_params.func_tol,
                                   grad_tol=self.opt_params.grad_tol,
                                   maxiter=self.opt_params.max_iterations,
                                   A=self.opt_params.A,
                                   b=self.opt_params.b,
                                   full_output=True,
                                   print_flag=self.opt_params.verbosity)

        # Return the minfx results unmodified.
        return results
Ejemplo n.º 3
0
def verify(min_algor='simplex', constraints=None):
    # Instantiate class.
    C = Profile()

    # Instantiate class.
    E = Exp(verbosity=0)

    # List to store chi2.
    chi2_list = []

    for values, errors, times, struct, num_times in C.loop_data():
        # Initialise the function to minimise.
        E.setup_data(values=values, errors=errors, times=times)

        # Initial guess for minimisation. Solved by linear least squares.
        x0 = asarray(E.estimate_x0_exp())

        E.set_settings_minfx(min_algor=min_algor, constraints=constraints)

        # Define func.
        func = func_wrapper
        dfunc = dfunc_wrapper
        d2func = d2func_wrapper

        # Initialise the function to minimise.
        scaling_list = [1.0, 1.0]
        setup(num_params=len(x0),
              num_times=len(E.times),
              values=E.values,
              sd=E.errors,
              relax_times=E.times,
              scaling_matrix=scaling_list)

        results = generic_minimise(func=func,
                                   dfunc=dfunc,
                                   d2func=d2func,
                                   args=(),
                                   x0=x0,
                                   min_algor=E.min_algor,
                                   min_options=E.min_options,
                                   func_tol=E.func_tol,
                                   grad_tol=E.grad_tol,
                                   maxiter=E.max_iterations,
                                   A=E.A,
                                   b=E.b,
                                   full_output=True,
                                   print_flag=E.verbosity)

        # Unpack
        param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

        # Add to list.
        chi2_list.append(chi2)

    return chi2_list
Ejemplo n.º 4
0
    def run(self, processor, completed):
        """Set up and perform the optimisation."""

        # Print out.
        if self.verbosity >= 1:
            # Individual spin block section.
            top = 2
            if self.verbosity >= 2:
                top += 2
            subsection(file=sys.stdout, text="Fitting to the spin block %s"%self.spin_ids, prespace=top)

            # Grid search printout.
            if search('^[Gg]rid', self.min_algor):
                print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % self.grid_size)

        # Initialise the function to minimise.
        model = Dispersion(model=self.spins[0].model, num_params=self.param_num, num_spins=len(self.spins), num_frq=len(self.fields), exp_types=self.exp_types, values=self.values, errors=self.errors, missing=self.missing, frqs=self.frqs, frqs_H=self.frqs_H, cpmg_frqs=self.cpmg_frqs, spin_lock_nu1=self.spin_lock_nu1, chemical_shifts=self.chemical_shifts, offset=self.offsets, tilt_angles=self.tilt_angles, r1=self.r1, relax_times=self.relax_times, scaling_matrix=self.scaling_matrix)

        # Grid search.
        if search('^[Gg]rid', self.min_algor):
            results = grid(func=model.func, args=(), num_incs=self.inc_new, lower=self.lower_new, upper=self.upper_new, A=self.A, b=self.b, verbosity=self.verbosity)

            # Unpack the results.
            param_vector, chi2, iter_count, warning = results
            f_count = iter_count
            g_count = 0.0
            h_count = 0.0

        # Minimisation.
        else:
            results = generic_minimise(func=model.func, args=(), x0=self.param_vector, min_algor=self.min_algor, min_options=self.min_options, func_tol=self.func_tol, grad_tol=self.grad_tol, maxiter=self.max_iterations, A=self.A, b=self.b, full_output=True, print_flag=self.verbosity)

            # Unpack the results.
            if results == None:
                return
            param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

        # Optimisation printout.
        if self.verbosity:
            print("\nOptimised parameter values:")
            for i in range(len(param_vector)):
                print("%-20s %25.15f" % (self.param_names[i], param_vector[i]*self.scaling_matrix[i, i]))

        # Printout.
        if self.sim_index != None:
            print("Simulation %s, cluster %s" % (self.sim_index+1, self.spin_ids))

        # Create the result command object to send back to the master.
        processor.return_object(Disp_result_command(processor=processor, memo_id=self.memo_id, param_vector=param_vector, chi2=chi2, iter_count=iter_count, f_count=f_count, g_count=g_count, h_count=h_count, warning=warning, missing=self.missing, back_calc=model.back_calc, completed=False))
Ejemplo n.º 5
0
    def run(self, processor, completed):
        """Set up and perform the optimisation."""

        # Print out.
        if self.verbosity >= 1:
            # Individual spin block section.
            top = 2
            if self.verbosity >= 2:
                top += 2
            subsection(file=sys.stdout, text="Fitting to the spin block %s"%self.spin_ids, prespace=top)

            # Grid search printout.
            if search('^[Gg]rid', self.min_algor):
                result = 1
                for x in self.inc:
                    result = mul(result, x)
                print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % result)

        # Initialise the function to minimise.
        model = Dispersion(model=self.spins[0].model, num_params=self.param_num, num_spins=count_spins(self.spins), num_frq=len(self.fields), exp_types=self.exp_types, values=self.values, errors=self.errors, missing=self.missing, frqs=self.frqs, frqs_H=self.frqs_H, cpmg_frqs=self.cpmg_frqs, spin_lock_nu1=self.spin_lock_nu1, chemical_shifts=self.chemical_shifts, offset=self.offsets, tilt_angles=self.tilt_angles, r1=self.r1, relax_times=self.relax_times, scaling_matrix=self.scaling_matrix, r1_fit=self.r1_fit)

        # Grid search.
        if search('^[Gg]rid', self.min_algor):
            results = grid(func=model.func, args=(), num_incs=self.inc, lower=self.lower, upper=self.upper, A=self.A, b=self.b, verbosity=self.verbosity)

            # Unpack the results.
            param_vector, chi2, iter_count, warning = results
            f_count = iter_count
            g_count = 0.0
            h_count = 0.0

        # Minimisation.
        else:
            results = generic_minimise(func=model.func, args=(), x0=self.param_vector, min_algor=self.min_algor, min_options=self.min_options, func_tol=self.func_tol, grad_tol=self.grad_tol, maxiter=self.max_iterations, A=self.A, b=self.b, full_output=True, print_flag=self.verbosity)

            # Unpack the results.
            if results == None:
                return
            param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

        # Optimisation printout.
        if self.verbosity:
            print("\nOptimised parameter values:")
            for i in range(len(param_vector)):
                print("%-20s %25.15f" % (self.param_names[i], param_vector[i]*self.scaling_matrix[i, i]))

        # Create the result command object to send back to the master.
        processor.return_object(Disp_result_command(processor=processor, memo_id=self.memo_id, param_vector=param_vector, chi2=chi2, iter_count=iter_count, f_count=f_count, g_count=g_count, h_count=h_count, warning=warning, missing=self.missing, back_calc=model.get_back_calc(), completed=False))
Ejemplo n.º 6
0
    def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling=True, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
        """Relaxation curve fitting minimisation method.

        @keyword min_algor:         The minimisation algorithm to use.
        @type min_algor:            str
        @keyword min_options:       An array of options to be used by the minimisation algorithm.
        @type min_options:          array of str
        @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
        @type func_tol:             None or float
        @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
        @type grad_tol:             None or float
        @keyword max_iterations:    The maximum number of iterations for the algorithm.
        @type max_iterations:       int
        @keyword constraints:       If True, constraints are used during optimisation.
        @type constraints:          bool
        @keyword scaling:           If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned.
        @type scaling:              bool
        @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:            int
        @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
        @type sim_index:            None or int
        @keyword lower:             The lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type lower:                array of numbers
        @keyword upper:             The upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type upper:                array of numbers
        @keyword inc:               The increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
        @type inc:                  array of int
        """

        # Test if sequence data is loaded.
        if not exists_mol_res_spin_data():
            raise RelaxNoSequenceError

        # Loop over the sequence.
        for spin, mol_name, res_num, res_name in spin_loop(full_info=True):
            # Skip deselected spins.
            if not spin.select:
                continue

            # Skip spins which have no data.
            if not hasattr(spin, 'intensities'):
                continue

            # Create the initial parameter vector.
            param_vector = self._assemble_param_vector(spin=spin)

            # Diagonal scaling.
            scaling_matrix = self._assemble_scaling_matrix(spin=spin, scaling=scaling)
            if len(scaling_matrix):
                param_vector = dot(inv(scaling_matrix), param_vector)

            # Get the grid search minimisation options.
            if match('^[Gg]rid', min_algor):
                inc, lower_new, upper_new = self._grid_search_setup(spin=spin, param_vector=param_vector, lower=lower, upper=upper, inc=inc, scaling_matrix=scaling_matrix)

            # Linear constraints.
            if constraints:
                A, b = self._linear_constraints(spin=spin, scaling_matrix=scaling_matrix)
            else:
                A, b = None, None

            # Print out.
            if verbosity >= 1:
                # Get the spin id string.
                spin_id = generate_spin_id_unique(mol_name=mol_name, res_num=res_num, res_name=res_name, spin_num=spin.num, spin_name=spin.name)

                # Individual spin printout.
                if verbosity >= 2:
                    print("\n\n")

                string = "Fitting to spin " + repr(spin_id)
                print("\n\n" + string)
                print(len(string) * '~')


            # Initialise the function to minimise.
            ######################################

            # The keys.
            keys = list(spin.intensities.keys())

            # The peak intensities and times.
            values = []
            errors = []
            times = []
            for key in keys:
                # The values.
                if sim_index == None:
                    values.append(spin.intensities[key])
                else:
                    values.append(spin.sim_intensities[sim_index][key])

                # The errors.
                errors.append(spin.intensity_err[key])

                # The relaxation times.
                times.append(cdp.relax_times[key])

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            for i in range(len(scaling_matrix)):
                scaling_list.append(scaling_matrix[i, i])

            setup(num_params=len(spin.params), num_times=len(values), values=values, sd=errors, relax_times=times, scaling_matrix=scaling_list)


            # Setup the minimisation algorithm when constraints are present.
            ################################################################

            if constraints and not match('^[Gg]rid', min_algor):
                algor = min_options[0]
            else:
                algor = min_algor


            # Levenberg-Marquardt minimisation.
            ###################################

            if match('[Ll][Mm]$', algor) or match('[Ll]evenburg-[Mm]arquardt$', algor):
                # Reconstruct the error data structure.
                lm_error = zeros(len(spin.relax_times), float64)
                index = 0
                for k in range(len(spin.relax_times)):
                    lm_error[index:index+len(relax_error[k])] = relax_error[k]
                    index = index + len(relax_error[k])

                min_options = min_options + (self.relax_fit.lm_dri, lm_error)


            # Minimisation.
            ###############

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=self._func, args=(), num_incs=inc, lower=lower_new, upper=upper_new, A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=self._func, dfunc=self._dfunc, d2func=self._d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling:
                param_vector = dot(scaling_matrix, param_vector)

            # Disassemble the parameter vector.
            self._disassemble_param_vector(param_vector=param_vector, spin=spin, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spin.chi2_sim[sim_index] = chi2

                # Iterations.
                spin.iter_sim[sim_index] = iter_count

                # Function evaluations.
                spin.f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spin.g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spin.h_count_sim[sim_index] = h_count

                # Warning.
                spin.warning_sim[sim_index] = warning


            # Normal statistics.
            else:
                # Chi-squared statistic.
                spin.chi2 = chi2

                # Iterations.
                spin.iter = iter_count

                # Function evaluations.
                spin.f_count = f_count

                # Gradient evaluations.
                spin.g_count = g_count

                # Hessian evaluations.
                spin.h_count = h_count

                # Warning.
                spin.warning = warning
Ejemplo n.º 7
0
        dic[i]['I0_e'] = I0_e

        # Now estimate with weighting.
        R_ew, I0_ew = est_x0_exp_weight(times=times, I=I_err, errors=errors)
        dic[i]['R_ew'] = R_ew
        dic[i]['I0_ew'] = I0_ew

        # Now estimate errors for parameters.
        sigma_R = point_r2eff_err(times=times, I=I_err, errors=errors)
        dic[i]['sigma_R'] = sigma_R

        # Minimisation.
        args = (times, I_err, errors)
        x0 = array([R_e, I0_e])

        params_minfx, chi2_minfx, iter_count, f_count, g_count, h_count, warning = generic_minimise(func=func_exp_chi2, dfunc=func_exp_chi2_grad, args=args, x0=x0, min_algor=min_algor, min_options=min_options, full_output=True, print_flag=0)
        R_m, I0_m = params_minfx
        dic[i]['R_m'] = R_m
        dic[i]['I0_m'] = I0_m

        # Estimate errors from Jacobian
        jacobian = func_exp_grad(params=params_minfx, times=times)
        dic[i]['jacobian'] = jacobian
        weights = 1. / errors**2
        dic[i]['weights'] = weights

        # Covariance matrix.
        pcov = multifit_covar(J=jacobian, weights=weights)
        dic[i]['pcov'] = pcov
        sigma_R_covar, sigma_I0_covar = sqrt(diag(pcov))
        dic[i]['sigma_R_covar'] = sigma_R_covar
Ejemplo n.º 8
0
        dic[i]['I0_e'] = I0_e

        # Now estimate with weighting.
        R_ew, I0_ew = est_x0_exp_weight(times=times, I=I_err, errors=errors)
        dic[i]['R_ew'] = R_ew
        dic[i]['I0_ew'] = I0_ew

        # Now estimate errors for parameters.
        sigma_R = point_r2eff_err(times=times, I=I_err, errors=errors)
        dic[i]['sigma_R'] = sigma_R

        # Minimisation.
        args = (times, I_err, errors)
        x0 = array([R_e, I0_e])

        params_minfx, chi2_minfx, iter_count, f_count, g_count, h_count, warning = generic_minimise(func=func_exp_chi2, dfunc=func_exp_chi2_grad, args=args, x0=x0, min_algor=min_algor, min_options=min_options, full_output=True, print_flag=0)
        R_m, I0_m = params_minfx
        dic[i]['R_m'] = R_m
        dic[i]['I0_m'] = I0_m

        # Estimate errors from Jacobian
        jacobian = func_exp_grad(params=params_minfx, times=times)
        dic[i]['jacobian'] = jacobian
        weights = 1. / errors**2
        dic[i]['weights'] = weights

        # Covariance matrix.
        pcov = multifit_covar(J=jacobian, weights=weights)
        dic[i]['pcov'] = pcov
        sigma_R_covar, sigma_I0_covar = sqrt(diag(pcov))
        dic[i]['sigma_R_covar'] = sigma_R_covar
Ejemplo n.º 9
0
Archivo: api.py Proyecto: tlinnet/relax
    def minimise(self,
                 min_algor=None,
                 min_options=None,
                 func_tol=None,
                 grad_tol=None,
                 max_iterations=None,
                 constraints=False,
                 scaling_matrix=None,
                 verbosity=0,
                 sim_index=None,
                 lower=None,
                 upper=None,
                 inc=None):
        """Minimisation function.

        @param min_algor:           The minimisation algorithm to use.
        @type min_algor:            str
        @param min_options:         An array of options to be used by the minimisation algorithm.
        @type min_options:          array of str
        @param func_tol:            The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check.
        @type func_tol:             None or float
        @param grad_tol:            The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check.
        @type grad_tol:             None or float
        @param max_iterations:      The maximum number of iterations for the algorithm.
        @type max_iterations:       int
        @param constraints:         If True, constraints are used during optimisation.
        @type constraints:          bool
        @keyword scaling_matrix:    The per-model list of diagonal and square scaling matrices.
        @type scaling_matrix:       list of numpy rank-2, float64 array or list of None
        @param verbosity:           A flag specifying the amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:            int
        @param sim_index:           The index of the simulation to optimise.  This should be None if normal optimisation is desired.
        @type sim_index:            None or int
        @keyword lower:             The per-model lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type lower:                list of lists of numbers
        @keyword upper:             The per-model upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type upper:                list of lists of numbers
        @keyword inc:               The per-model increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
        @type inc:                  list of lists of int
        """

        # Set up the target function for direct calculation.
        model, param_vector, data_types = target_fn_setup(
            sim_index=sim_index,
            scaling_matrix=scaling_matrix[0],
            verbosity=verbosity)

        # Nothing to do!
        if not len(param_vector):
            warn(
                RelaxWarning(
                    "The model has no parameters, minimisation cannot be performed."
                ))
            return

        # Right, constraints cannot be used for the 'fixed' model.
        if constraints and cdp.model == 'fixed':
            if verbosity:
                warn(
                    RelaxWarning(
                        "Turning constraints off.  These cannot be used for the 'fixed' model."
                    ))
            constraints = False

            # Pop out the Method of Multipliers algorithm.
            if min_algor == 'Method of Multipliers':
                min_algor = min_options[0]
                min_options = min_options[1:]

        # And constraints absolutely must be used for the 'population' model.
        if not constraints and cdp.model == 'population':
            warn(
                RelaxWarning(
                    "Turning constraints on.  These absolutely must be used for the 'population' model."
                ))
            constraints = True

            # Add the Method of Multipliers algorithm.
            min_options = (min_algor, ) + min_options
            min_algor = 'Method of Multipliers'

        # Disallow Newton optimisation and other Hessian optimisers for the paramagnetic centre position optimisation (the PCS Hessian is not yet implemented).
        if hasattr(cdp,
                   'paramag_centre_fixed') and not cdp.paramag_centre_fixed:
            if min_algor in ['newton']:
                raise RelaxError(
                    "For the paramagnetic centre position, as the Hessians are not yet implemented Newton optimisation cannot be performed."
                )

        # Linear constraints.
        A, b = None, None
        if constraints:
            A, b = linear_constraints(data_types=data_types,
                                      scaling_matrix=scaling_matrix[0])

        # Grid search.
        if search('^[Gg]rid', min_algor):
            # The search.
            results = grid(func=model.func,
                           args=(),
                           num_incs=inc[0],
                           lower=lower[0],
                           upper=upper[0],
                           A=A,
                           b=b,
                           verbosity=verbosity)

            # Unpack the results.
            param_vector, func, iter_count, warning = results
            f_count = iter_count
            g_count = 0.0
            h_count = 0.0

        # Minimisation.
        else:
            results = generic_minimise(func=model.func,
                                       dfunc=model.dfunc,
                                       d2func=model.d2func,
                                       args=(),
                                       x0=param_vector,
                                       min_algor=min_algor,
                                       min_options=min_options,
                                       func_tol=func_tol,
                                       grad_tol=grad_tol,
                                       maxiter=max_iterations,
                                       A=A,
                                       b=b,
                                       full_output=1,
                                       print_flag=verbosity)

            # Unpack the results.
            if results == None:
                return
            param_vector, func, iter_count, f_count, g_count, h_count, warning = results

        # Catch infinite chi-squared values.
        if isInf(func):
            raise RelaxInfError('chi-squared')

        # Catch chi-squared values of NaN.
        if isNaN(func):
            raise RelaxNaNError('chi-squared')

        # Make a last function call to update the back-calculated RDC and PCS structures to the optimal values.
        chi2 = model.func(param_vector)

        # Scaling.
        if scaling_matrix[0] is not None:
            param_vector = dot(scaling_matrix[0], param_vector)

        # Disassemble the parameter vector.
        disassemble_param_vector(param_vector=param_vector,
                                 data_types=data_types,
                                 sim_index=sim_index)

        # Monte Carlo minimisation statistics.
        if sim_index != None:
            # Chi-squared statistic.
            cdp.chi2_sim[sim_index] = func

            # Iterations.
            cdp.iter_sim[sim_index] = iter_count

            # Function evaluations.
            cdp.f_count_sim[sim_index] = f_count

            # Gradient evaluations.
            cdp.g_count_sim[sim_index] = g_count

            # Hessian evaluations.
            cdp.h_count_sim[sim_index] = h_count

            # Warning.
            cdp.warning_sim[sim_index] = warning

        # Normal statistics.
        else:
            # Chi-squared statistic.
            cdp.chi2 = func

            # Iterations.
            cdp.iter = iter_count

            # Function evaluations.
            cdp.f_count = f_count

            # Gradient evaluations.
            cdp.g_count = g_count

            # Hessian evaluations.
            cdp.h_count = h_count

            # Warning.
            cdp.warning = warning

        # Statistical analysis.
        if 'rdc' in data_types or 'pcs' in data_types:
            # Get the final back calculated data (for the Q factor and
            minimise_bc_data(model, sim_index=sim_index)

            # Calculate the RDC Q factors.
            if 'rdc' in data_types:
                rdc.q_factors(sim_index=sim_index, verbosity=verbosity)

            # Calculate the PCS Q factors.
            if 'pcs' in data_types:
                pcs.q_factors(sim_index=sim_index, verbosity=verbosity)
Ejemplo n.º 10
0
def minimise_r2eff(spins=None, spin_ids=None, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
    """Optimise the R2eff model by fitting the 2-parameter exponential curves.

    This mimics the R1 and R2 relax_fit analysis.


    @keyword spins:             The list of spins for the cluster.
    @type spins:                list of SpinContainer instances
    @keyword spin_ids:          The list of spin IDs for the cluster.
    @type spin_ids:             list of str
    @keyword min_algor:         The minimisation algorithm to use.
    @type min_algor:            str
    @keyword min_options:       An array of options to be used by the minimisation algorithm.
    @type min_options:          array of str
    @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type func_tol:             None or float
    @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type grad_tol:             None or float
    @keyword max_iterations:    The maximum number of iterations for the algorithm.
    @type max_iterations:       int
    @keyword constraints:       If True, constraints are used during optimisation.
    @type constraints:          bool
    @keyword scaling_matrix:    The diagonal and square scaling matrix.
    @type scaling_matrix:       numpy rank-2, float64 array or None
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
    @type sim_index:            None or int
    @keyword lower:             The model specific lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type lower:                list of numbers
    @keyword upper:             The model specific upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type upper:                list of numbers
    @keyword inc:               The model specific increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
    @type inc:                  list of int
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Loop over the spins.
    for si in range(len(spins)):
        # Skip deselected spins.
        if not spins[si].select:
            continue

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point in loop_exp_frq_offset_point():
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

            # The initial parameter vector.
            param_vector = assemble_param_vector(spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Diagonal scaling.
            if scaling_matrix is not None:
                param_vector = dot(inv(scaling_matrix), param_vector)

            # Linear constraints.
            A, b = None, None
            if constraints:
                A, b = linear_constraints(spins=[spins[si]], scaling_matrix=scaling_matrix)

            # Print out.
            if verbosity >= 1:
                # Individual spin section.
                top = 2
                if verbosity >= 2:
                    top += 2
                text = "Fitting to spin %s, frequency %s and dispersion point %s" % (spin_ids[si], frq, point)
                subsection(file=sys.stdout, text=text, prespace=top)

                # Grid search printout.
                if match('^[Gg]rid', min_algor):
                    result = 1
                    for x in inc:
                        result = mul(result, x)
                    print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % result)

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                values.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, sim_index=sim_index))
                errors.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                times.append(time)

            # Raise errors if number of time points is less than 2.
            if len(times) < 3:
                subsection(file=sys.stdout, text="Exponential curve fitting error for point:", prespace=2)
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (exp_type, frq/1E6, offset, point, len(times))
                print(point_info)
                raise RelaxError("The data setup points to exponential curve fitting, but only %i time points was found, where 3 time points is minimum.  If calculating R2eff values for fixed relaxation time period data, check that a reference intensity has been specified for each offset value."%(len(times)))

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            if scaling_matrix is None:
                for i in range(len(param_vector)):
                    scaling_list.append(1.0)
            else:
                for i in range(len(scaling_matrix)):
                    scaling_list.append(scaling_matrix[i, i])

            # Initialise the function to minimise.
            model = Relax_fit_opt(model='exp', num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling_matrix is not None:
                param_vector = dot(scaling_matrix, param_vector)

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spins[si].chi2_sim[sim_index] = chi2

                # Iterations.
                spins[si].iter_sim[sim_index] = iter_count

                # Function evaluations.
                spins[si].f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spins[si].g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spins[si].h_count_sim[sim_index] = h_count

                # Warning.
                spins[si].warning_sim[sim_index] = warning

            # Normal statistics.
            else:
                # Chi-squared statistic.
                spins[si].chi2 = chi2

                # Iterations.
                spins[si].iter = iter_count

                # Function evaluations.
                spins[si].f_count = f_count

                # Gradient evaluations.
                spins[si].g_count = g_count

                # Hessian evaluations.
                spins[si].h_count = h_count

                # Warning.
                spins[si].warning = warning
Ejemplo n.º 11
0
def find_pivot(models=None, atom_id=None, init_pos=None, func_tol=1e-5, box_limit=200):
    """Superimpose a set of structural models.

    @keyword models:    The list of models to use.  If set to None, then all models will be used.
    @type models:       list of int or None
    @keyword atom_id:   The molecule, residue, and atom identifier string.  This matches the spin ID string format.
    @type atom_id:      str or None
    @keyword init_pos:  The starting pivot position for the pivot point optimisation.
    @type init_pos:     list of float or numpy rank-1, 3D array
    @keyword func_tol:  The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type func_tol:     None or float
    @keyword box_limit: The simplex optimisation used in this function is constrained withing a box of +/- x Angstrom containing the pivot point using the logarithmic barrier function.  This argument is the value of x.
    @type box_limit:    int
    """

    # Test if the current data pipe exists.
    pipes.test()

    # Initialised the starting position if needed.
    if init_pos == None:
        init_pos = zeros(3, float64)
    init_pos = array(init_pos)

    # Validate the models.
    cdp.structure.validate_models()

    # Create a list of all models.
    if models == None:
        models = []
        for model in cdp.structure.model_loop():
            models.append(model.num)

    # Assemble the atomic coordinates of all models.
    coord = []
    for model in models:
        coord.append([])
        for pos in cdp.structure.atom_loop(atom_id=atom_id, model_num=model, pos_flag=True):
            coord[-1].append(pos[0])
        coord[-1] = array(coord[-1])
    coord = array(coord)

    # Linear constraints for the pivot position (between -1000 and 1000 Angstrom).
    A = zeros((6, 3), float64)
    b = zeros(6, float64)
    for i in range(3):
        A[2*i, i] = 1
        A[2*i+1, i] = -1
        b[2*i] = -box_limit
        b[2*i+1] = -box_limit

    # The target function.
    finder = Pivot_finder(models, coord)
    results = generic_minimise(func=finder.func, x0=init_pos, min_algor='Log barrier', min_options=('simplex',), A=A, b=b, func_tol=func_tol, print_flag=1)

    # No result.
    if results == None:
        return

    # Store the data.
    cdp.structure.pivot = results

    # Print out.
    print("Motional pivot found at:  %s" % results)
Ejemplo n.º 12
0
                if j in range(0, 100000, 100):
                    print("Simulation %i"%j)
                # Start minimisation.

                # Produce errors
                I_err = []
                for j, error in enumerate(errors):
                    I_error = gauss(values[j], error)
                    I_err.append(I_error)
                # Convert to numpy array.
                I_err = asarray(I_err)

                x0 = [r2eff, i0]
                setup(num_params=len(x0), num_times=len(times), values=I_err, sd=errors, relax_times=times, scaling_matrix=scaling_list)

                params_minfx_sim_j, chi2_minfx_sim_j, iter_count, f_count, g_count, h_count, warning = generic_minimise(func=func, dfunc=dfunc, d2func=d2func, args=(), x0=x0, min_algor=min_algor, min_options=min_options, full_output=True, print_flag=0)
                R_m_sim_j, I0_m_sim_j = params_minfx_sim_j
                R_m_sim_l.append(R_m_sim_j)
                I0_m_sim_l.append(I0_m_sim_j)

            my_dic[spin_id][param_key]['r2eff_array_boot'] = asarray(R_m_sim_l)
            my_dic[spin_id][param_key]['i0_array_boot'] = asarray(I0_m_sim_l)

            # Get stats on distribution.
            sigma_R_sim = std(asarray(R_m_sim_l), ddof=1)
            sigma_I0_sim = std(asarray(I0_m_sim_l), ddof=1)
            my_dic[spin_id][param_key]['r2eff_err_boot'] = sigma_R_sim
            my_dic[spin_id][param_key]['i0_err_boot'] = sigma_I0_sim


# Now do relax monte carli
Ejemplo n.º 13
0
    def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
        """Relaxation curve fitting minimisation method.

        @keyword min_algor:         The minimisation algorithm to use.
        @type min_algor:            str
        @keyword min_options:       An array of options to be used by the minimisation algorithm.
        @type min_options:          array of str
        @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
        @type func_tol:             None or float
        @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
        @type grad_tol:             None or float
        @keyword max_iterations:    The maximum number of iterations for the algorithm.
        @type max_iterations:       int
        @keyword constraints:       If True, constraints are used during optimisation.
        @type constraints:          bool
        @keyword scaling_matrix:    The per-model list of diagonal and square scaling matrices.
        @type scaling_matrix:       list of numpy rank-2, float64 array or list of None
        @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:            int
        @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
        @type sim_index:            None or int
        @keyword lower:             The per-model lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type lower:                list of lists of numbers
        @keyword upper:             The per-model upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type upper:                list of lists of numbers
        @keyword inc:               The per-model increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
        @type inc:                  list of lists of int
        """

        # Checks.
        check_mol_res_spin_data()

        # Loop over the sequence.
        model_index = 0
        for spin, spin_id in self.model_loop():
            # Skip deselected spins.
            if not spin.select:
                continue

            # Skip spins which have no data.
            if not hasattr(spin, 'peak_intensity'):
                continue

            # Create the initial parameter vector.
            param_vector = assemble_param_vector(spin=spin)

            # Diagonal scaling.
            if scaling_matrix[model_index] is not None:
                param_vector = dot(inv(scaling_matrix[model_index]), param_vector)

            # Linear constraints.
            if constraints:
                A, b = linear_constraints(spin=spin, scaling_matrix=scaling_matrix[model_index])
            else:
                A, b = None, None

            # Print out.
            if verbosity >= 1:
                # Individual spin printout.
                if verbosity >= 2:
                    print("\n\n")

                string = "Fitting to spin " + repr(spin_id)
                print("\n\n" + string)
                print(len(string) * '~')


            # Initialise the function to minimise.
            ######################################

            # The peak intensities and times.
            values = []
            errors = []
            times = []
            for key in spin.peak_intensity:
                # The values.
                if sim_index == None:
                    values.append(spin.peak_intensity[key])
                else:
                    values.append(spin.peak_intensity_sim[sim_index][key])

                # The errors.
                errors.append(spin.peak_intensity_err[key])

                # The relaxation times.
                times.append(cdp.relax_times[key])

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            if scaling_matrix[model_index] is None:
                for i in range(len(param_vector)):
                    scaling_list.append(1.0)
            else:
                for i in range(len(scaling_matrix[model_index])):
                    scaling_list.append(scaling_matrix[model_index][i, i])

            # Set up the target function.
            model = Relax_fit_opt(model=spin.model, num_params=len(spin.params), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)


            # Setup the minimisation algorithm when constraints are present.
            ################################################################

            if constraints and not match('^[Gg]rid', min_algor):
                algor = min_options[0]
            else:
                algor = min_algor


            # Levenberg-Marquardt minimisation.
            ###################################

            if match('[Ll][Mm]$', algor) or match('[Ll]evenburg-[Mm]arquardt$', algor):
                # Reconstruct the error data structure.
                lm_error = zeros(len(spin.relax_times), float64)
                index = 0
                for k in range(len(spin.relax_times)):
                    lm_error[index:index+len(relax_error[k])] = relax_error[k]
                    index = index + len(relax_error[k])

                min_options = min_options + (self.relax_fit.lm_dri, lm_error)


            # Minimisation.
            ###############

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=model.func, args=(), num_incs=inc[model_index], lower=lower[model_index], upper=upper[model_index], A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling_matrix[model_index] is not None:
                param_vector = dot(scaling_matrix[model_index], param_vector)

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spin=spin, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spin.chi2_sim[sim_index] = chi2

                # Iterations.
                spin.iter_sim[sim_index] = iter_count

                # Function evaluations.
                spin.f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spin.g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spin.h_count_sim[sim_index] = h_count

                # Warning.
                spin.warning_sim[sim_index] = warning


            # Normal statistics.
            else:
                # Chi-squared statistic.
                spin.chi2 = chi2

                # Iterations.
                spin.iter = iter_count

                # Function evaluations.
                spin.f_count = f_count

                # Gradient evaluations.
                spin.g_count = g_count

                # Hessian evaluations.
                spin.h_count = h_count

                # Warning.
                spin.warning = warning

            # Increment the model index.
            model_index += 1
Ejemplo n.º 14
0
# Loop over the bootstrapping simulations.
SIMS = 200000
R_sim = []
I0_sim = []
for sim_index in range(SIMS):
    # Printout.
    if sim_index % 100 == 0:
        print("Simulation %i" % sim_index)

    # Randomise the data.
    I_boot = []
    for i in range(len(I)):
        I_boot.append(gauss(I[i], errors[i]))

    # Minimisation.
    xk, fk, k, f_count, g_count, h_count, warning = generic_minimise(func=func, x0=params, min_algor='simplex', print_flag=0, full_output=True)

    # Store the optimised parameters.
    R_sim.append(xk[0])
    I0_sim.append(xk[1])

# Errors.
R_err = std(array(R_sim))
I0_err = std(array(I0_sim))

# Printout.
print("\n\nParameter errors:")
print("sigma_R:   %25.20f" % R_err)
print("sigma_I0:  %25.20f" % I0_err)
Ejemplo n.º 15
0
def minimise_r2eff(spins=None, spin_ids=None, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
    """Optimise the R2eff model by fitting the 2-parameter exponential curves.

    This mimics the R1 and R2 relax_fit analysis.


    @keyword spins:             The list of spins for the cluster.
    @type spins:                list of SpinContainer instances
    @keyword spin_ids:          The list of spin IDs for the cluster.
    @type spin_ids:             list of str
    @keyword min_algor:         The minimisation algorithm to use.
    @type min_algor:            str
    @keyword min_options:       An array of options to be used by the minimisation algorithm.
    @type min_options:          array of str
    @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type func_tol:             None or float
    @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
    @type grad_tol:             None or float
    @keyword max_iterations:    The maximum number of iterations for the algorithm.
    @type max_iterations:       int
    @keyword constraints:       If True, constraints are used during optimisation.
    @type constraints:          bool
    @keyword scaling_matrix:    The diagonal and square scaling matrix.
    @type scaling_matrix:       numpy rank-2, float64 array or None
    @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
    @type verbosity:            int
    @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
    @type sim_index:            None or int
    @keyword lower:             The model specific lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type lower:                list of numbers
    @keyword upper:             The model specific upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
    @type upper:                list of numbers
    @keyword inc:               The model specific increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
    @type inc:                  list of int
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Loop over the spins.
    for si in range(len(spins)):
        # Skip deselected spins.
        if not spins[si].select:
            continue

        # Loop over each spectrometer frequency and dispersion point.
        for exp_type, frq, offset, point in loop_exp_frq_offset_point():
            # The parameter key.
            param_key = return_param_key_from_data(exp_type=exp_type, frq=frq, offset=offset, point=point)

            # The initial parameter vector.
            param_vector = assemble_param_vector(spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Diagonal scaling.
            if scaling_matrix is not None:
                param_vector = dot(inv(scaling_matrix), param_vector)

            # Linear constraints.
            A, b = None, None
            if constraints:
                A, b = linear_constraints(spins=[spins[si]], scaling_matrix=scaling_matrix)

            # Print out.
            if verbosity >= 1:
                # Individual spin section.
                top = 2
                if verbosity >= 2:
                    top += 2
                text = "Fitting to spin %s, frequency %s and dispersion point %s" % (spin_ids[si], frq, point)
                subsection(file=sys.stdout, text=text, prespace=top)

                # Grid search printout.
                if match('^[Gg]rid', min_algor):
                    result = 1
                    for x in inc:
                        result = mul(result, x)
                    print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % result)

            # The peak intensities, errors and times.
            values = []
            errors = []
            times = []
            data_flag = True
            for time in loop_time(exp_type=exp_type, frq=frq, offset=offset, point=point):
                # Check the peak intensity keys.
                int_keys = find_intensity_keys(exp_type=exp_type, frq=frq, offset=offset, point=point, time=time)
                peak_intensities = spins[si].peak_intensity
                if sim_index != None:
                    peak_intensities = spins[si].peak_intensity_sim
                for i in range(len(int_keys)):
                    if int_keys[i] not in peak_intensities:
                        if verbosity:
                            warn(RelaxWarning("The spin %s peak intensity key '%s' is not present, skipping the optimisation." % (spin_ids[si], int_keys[i])))
                        data_flag = False
                        break

                if data_flag:
                    values.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, sim_index=sim_index))
                    errors.append(average_intensity(spin=spins[si], exp_type=exp_type, frq=frq, offset=offset, point=point, time=time, error=True))
                    times.append(time)
            if not data_flag:
                continue

            # Raise errors if number of time points is less than 2.
            if len(times) < 3:
                subsection(file=sys.stdout, text="Exponential curve fitting error for point:", prespace=2)
                point_info = "%s at %3.1f MHz, for offset=%3.3f ppm and dispersion point %-5.1f, with %i time points." % (exp_type, frq/1E6, offset, point, len(times))
                raise RelaxError("The data setup points to exponential curve fitting, but only %i time points was found, where 3 time points is minimum.  If calculating R2eff values for fixed relaxation time period data, check that a reference intensity has been specified for each offset value."%(len(times)))

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            if scaling_matrix is None:
                for i in range(len(param_vector)):
                    scaling_list.append(1.0)
            else:
                for i in range(len(scaling_matrix)):
                    scaling_list.append(scaling_matrix[i, i])

            # Initialise the function to minimise.
            model = Relax_fit_opt(model='exp', num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list)

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling_matrix is not None:
                param_vector = dot(scaling_matrix, param_vector)

            # Disassemble the parameter vector.
            disassemble_param_vector(param_vector=param_vector, spins=[spins[si]], key=param_key, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spins[si].chi2_sim[sim_index] = chi2

                # Iterations.
                spins[si].iter_sim[sim_index] = iter_count

                # Function evaluations.
                spins[si].f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spins[si].g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spins[si].h_count_sim[sim_index] = h_count

                # Warning.
                spins[si].warning_sim[sim_index] = warning

            # Normal statistics.
            else:
                # Chi-squared statistic.
                spins[si].chi2 = chi2

                # Iterations.
                spins[si].iter = iter_count

                # Function evaluations.
                spins[si].f_count = f_count

                # Gradient evaluations.
                spins[si].g_count = g_count

                # Hessian evaluations.
                spins[si].h_count = h_count

                # Warning.
                spins[si].warning = warning
Ejemplo n.º 16
0
R_sim = []
I0_sim = []
for sim_index in range(SIMS):
    # Printout.
    if sim_index % 100 == 0:
        print("Simulation %i" % sim_index)

    # Randomise the data.
    I_boot = []
    for i in range(len(I)):
        I_boot.append(gauss(I[i], errors[i]))

    # Minimisation.
    xk, fk, k, f_count, g_count, h_count, warning = generic_minimise(
        func=func,
        x0=params,
        min_algor='simplex',
        print_flag=0,
        full_output=True)

    # Store the optimised parameters.
    R_sim.append(xk[0])
    I0_sim.append(xk[1])

# Errors.
R_err = std(array(R_sim))
I0_err = std(array(I0_sim))

# Printout.
print("\n\nParameter errors:")
print("sigma_R:   %25.20f" % R_err)
print("sigma_I0:  %25.20f" % I0_err)
Ejemplo n.º 17
0
    def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
        """Minimisation function.

        @param min_algor:           The minimisation algorithm to use.
        @type min_algor:            str
        @param min_options:         An array of options to be used by the minimisation algorithm.
        @type min_options:          array of str
        @param func_tol:            The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check.
        @type func_tol:             None or float
        @param grad_tol:            The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check.
        @type grad_tol:             None or float
        @param max_iterations:      The maximum number of iterations for the algorithm.
        @type max_iterations:       int
        @param constraints:         If True, constraints are used during optimisation.
        @type constraints:          bool
        @keyword scaling_matrix:    The per-model list of diagonal and square scaling matrices.
        @type scaling_matrix:       list of numpy rank-2, float64 array or list of None
        @param verbosity:           A flag specifying the amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:            int
        @param sim_index:           The index of the simulation to optimise.  This should be None if normal optimisation is desired.
        @type sim_index:            None or int
        @keyword lower:             The per-model lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type lower:                list of lists of numbers
        @keyword upper:             The per-model upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type upper:                list of lists of numbers
        @keyword inc:               The per-model increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
        @type inc:                  list of lists of int
        """

        # Set up the target function for direct calculation.
        model, param_vector, data_types = target_fn_setup(sim_index=sim_index, scaling_matrix=scaling_matrix[0], verbosity=verbosity)

        # Nothing to do!
        if not len(param_vector):
            warn(RelaxWarning("The model has no parameters, minimisation cannot be performed."))
            return

        # Right, constraints cannot be used for the 'fixed' model.
        if constraints and cdp.model == 'fixed':
            if verbosity:
                warn(RelaxWarning("Turning constraints off.  These cannot be used for the 'fixed' model."))
            constraints = False

            # Pop out the Method of Multipliers algorithm.
            if min_algor == 'Method of Multipliers':
                min_algor = min_options[0]
                min_options = min_options[1:]

        # And constraints absolutely must be used for the 'population' model.
        if not constraints and cdp.model == 'population':
            warn(RelaxWarning("Turning constraints on.  These absolutely must be used for the 'population' model."))
            constraints = True

            # Add the Method of Multipliers algorithm.
            min_options = (min_algor,) + min_options
            min_algor = 'Method of Multipliers'

        # Disallow Newton optimisation and other Hessian optimisers for the paramagnetic centre position optimisation (the PCS Hessian is not yet implemented).
        if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed:
            if min_algor in ['newton']:
                raise RelaxError("For the paramagnetic centre position, as the Hessians are not yet implemented Newton optimisation cannot be performed.")

        # Linear constraints.
        A, b = None, None
        if constraints:
            A, b = linear_constraints(data_types=data_types, scaling_matrix=scaling_matrix[0])

        # Grid search.
        if search('^[Gg]rid', min_algor):
            # The search.
            results = grid(func=model.func, args=(), num_incs=inc[0], lower=lower[0], upper=upper[0], A=A, b=b, verbosity=verbosity)

            # Unpack the results.
            param_vector, func, iter_count, warning = results
            f_count = iter_count
            g_count = 0.0
            h_count = 0.0

        # Minimisation.
        else:
            results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=1, print_flag=verbosity)

            # Unpack the results.
            if results == None:
                return
            param_vector, func, iter_count, f_count, g_count, h_count, warning = results

        # Catch infinite chi-squared values.
        if isInf(func):
            raise RelaxInfError('chi-squared')

        # Catch chi-squared values of NaN.
        if isNaN(func):
            raise RelaxNaNError('chi-squared')

        # Make a last function call to update the back-calculated RDC and PCS structures to the optimal values.
        chi2 = model.func(param_vector)

        # Scaling.
        if scaling_matrix[0] is not None:
            param_vector = dot(scaling_matrix[0], param_vector)

        # Disassemble the parameter vector.
        disassemble_param_vector(param_vector=param_vector, data_types=data_types, sim_index=sim_index)

        # Monte Carlo minimisation statistics.
        if sim_index != None:
            # Chi-squared statistic.
            cdp.chi2_sim[sim_index] = func

            # Iterations.
            cdp.iter_sim[sim_index] = iter_count

            # Function evaluations.
            cdp.f_count_sim[sim_index] = f_count

            # Gradient evaluations.
            cdp.g_count_sim[sim_index] = g_count

            # Hessian evaluations.
            cdp.h_count_sim[sim_index] = h_count

            # Warning.
            cdp.warning_sim[sim_index] = warning

        # Normal statistics.
        else:
            # Chi-squared statistic.
            cdp.chi2 = func

            # Iterations.
            cdp.iter = iter_count

            # Function evaluations.
            cdp.f_count = f_count

            # Gradient evaluations.
            cdp.g_count = g_count

            # Hessian evaluations.
            cdp.h_count = h_count

            # Warning.
            cdp.warning = warning

        # Statistical analysis.
        if 'rdc' in data_types or 'pcs' in data_types:
            # Get the final back calculated data (for the Q factor and
            minimise_bc_data(model, sim_index=sim_index)

            # Calculate the RDC Q factors.
            if 'rdc' in data_types:
                rdc.q_factors(sim_index=sim_index, verbosity=verbosity)

            # Calculate the PCS Q factors.
            if 'pcs' in data_types:
                pcs.q_factors(sim_index=sim_index, verbosity=verbosity)
Ejemplo n.º 18
0
def minimise_minfx(E=None):
    """Estimate r2eff and errors by minimising with minfx.

    @keyword E:     The Exponential function class, which contain data and functions.
    @type E:        class
    @return:        Packed list with optimised parameter, parameter error set to 'inf', chi2, iter_count, f_count, g_count, h_count, warning
    @rtype:         list
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError(
            "Relaxation curve fitting is not available.  Try compiling the C modules on your platform."
        )

    # Initial guess for minimisation. Solved by linear least squares.
    x0 = asarray(E.estimate_x0_exp(times=E.times, values=E.values))

    if E.c_code:
        # Minimise with C code.

        # Initialise the function to minimise.
        scaling_list = [1.0, 1.0]
        model = Relax_fit_opt(model='exp',
                              num_params=len(x0),
                              values=E.values,
                              errors=E.errors,
                              relax_times=E.times,
                              scaling_matrix=scaling_list)

        # Define function to minimise for minfx.
        t_func = model.func
        t_dfunc = model.dfunc
        t_d2func = model.d2func
        args = ()

    else:
        # Minimise with minfx.
        # Define function to minimise for minfx.
        t_func = E.func_exp_chi2
        t_dfunc = E.func_exp_chi2_grad
        t_d2func = None
        # All args to function. Params are packed out through function, then other parameters.
        args = (E.times, E.values, E.errors)

    # Minimise.
    results_minfx = generic_minimise(func=t_func,
                                     dfunc=t_dfunc,
                                     d2func=t_d2func,
                                     args=args,
                                     x0=x0,
                                     min_algor=E.min_algor,
                                     min_options=E.min_options,
                                     func_tol=E.func_tol,
                                     grad_tol=E.grad_tol,
                                     maxiter=E.max_iterations,
                                     A=E.A,
                                     b=E.b,
                                     full_output=True,
                                     print_flag=0)

    # Unpack
    param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results_minfx

    # Extract.
    r2eff, i0 = param_vector

    # Get the Jacobian.
    if E.c_code == True:
        if E.chi2_jacobian:
            # Use the chi2 Jacobian from C.
            jacobian_matrix_exp = transpose(
                asarray(model.jacobian_chi2(param_vector)))
            weights = ones(E.errors.shape)

        else:
            # Use the direct Jacobian from C.
            jacobian_matrix_exp = transpose(
                asarray(model.jacobian(param_vector)))
            weights = 1. / E.errors**2

    elif E.c_code == False:
        if E.chi2_jacobian:
            # Use the chi2 Jacobian from python.
            jacobian_matrix_exp = E.func_exp_chi2_grad_array(
                params=param_vector,
                times=E.times,
                values=E.values,
                errors=E.errors)
            weights = ones(E.errors.shape)

        else:
            # Use the direct Jacobian from python.
            jacobian_matrix_exp = E.func_exp_grad(params=param_vector,
                                                  times=E.times)
            weights = 1. / E.errors**2

    pcov = multifit_covar(J=jacobian_matrix_exp, weights=weights)

    # To compute one standard deviation errors on the parameters, take the square root of the diagonal covariance.
    param_vector_error = sqrt(diag(pcov))

    # Pack to list.
    results = [
        param_vector, param_vector_error, chi2, iter_count, f_count, g_count,
        h_count, warning
    ]

    # Return, including errors.
    return results
                if j in range(0, 100000, 100):
                    print("Simulation %i"%j)
                # Start minimisation.

                # Produce errors
                I_err = []
                for j, error in enumerate(errors):
                    I_error = gauss(values[j], error)
                    I_err.append(I_error)
                # Convert to numpy array.
                I_err = asarray(I_err)

                x0 = [r2eff, i0]
                setup(num_params=len(x0), num_times=len(times), values=I_err, sd=errors, relax_times=times, scaling_matrix=scaling_list)

                params_minfx_sim_j, chi2_minfx_sim_j, iter_count, f_count, g_count, h_count, warning = generic_minimise(func=func, dfunc=dfunc, d2func=d2func, args=(), x0=x0, min_algor=min_algor, min_options=min_options, full_output=True, print_flag=0)
                R_m_sim_j, I0_m_sim_j = params_minfx_sim_j
                R_m_sim_l.append(R_m_sim_j)
                I0_m_sim_l.append(I0_m_sim_j)

            my_dic[spin_id][param_key]['r2eff_array_boot'] = asarray(R_m_sim_l)
            my_dic[spin_id][param_key]['i0_array_boot'] = asarray(I0_m_sim_l)

            # Get stats on distribution.
            sigma_R_sim = std(asarray(R_m_sim_l), ddof=1)
            sigma_I0_sim = std(asarray(I0_m_sim_l), ddof=1)
            my_dic[spin_id][param_key]['r2eff_err_boot'] = sigma_R_sim
            my_dic[spin_id][param_key]['i0_err_boot'] = sigma_I0_sim


# Now do relax monte carli
Ejemplo n.º 20
0
def minimise_minfx(E=None):
    """Estimate r2eff and errors by minimising with minfx.

    @keyword E:     The Exponential function class, which contain data and functions.
    @type E:        class
    @return:        Packed list with optimised parameter, parameter error set to 'inf', chi2, iter_count, f_count, g_count, h_count, warning
    @rtype:         list
    """

    # Check that the C modules have been compiled.
    if not C_module_exp_fn:
        raise RelaxError("Relaxation curve fitting is not available.  Try compiling the C modules on your platform.")

    # Initial guess for minimisation. Solved by linear least squares.
    x0 = asarray( E.estimate_x0_exp(times=E.times, values=E.values) )

    if E.c_code:
        # Minimise with C code.

        # Initialise the function to minimise.
        scaling_list = [1.0, 1.0]
        model = Relax_fit_opt(model='exp', num_params=len(x0), values=E.values, errors=E.errors, relax_times=E.times, scaling_matrix=scaling_list)

        # Define function to minimise for minfx.
        t_func = model.func
        t_dfunc = model.dfunc
        t_d2func = model.d2func
        args=()

    else:
        # Minimise with minfx.
        # Define function to minimise for minfx.
        t_func = E.func_exp_chi2
        t_dfunc = E.func_exp_chi2_grad
        t_d2func = None
        # All args to function. Params are packed out through function, then other parameters.
        args=(E.times, E.values, E.errors)

    # Minimise.
    results_minfx = generic_minimise(func=t_func, dfunc=t_dfunc, d2func=t_d2func, args=args, x0=x0, min_algor=E.min_algor, min_options=E.min_options, func_tol=E.func_tol, grad_tol=E.grad_tol, maxiter=E.max_iterations, A=E.A, b=E.b, full_output=True, print_flag=0)

    # Unpack
    param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results_minfx

    # Extract.
    r2eff, i0 = param_vector

    # Get the Jacobian.
    if E.c_code == True:
        if E.chi2_jacobian:
            # Use the chi2 Jacobian from C.
            jacobian_matrix_exp = transpose(asarray( model.jacobian_chi2(param_vector) ) )
            weights = ones(E.errors.shape)

        else:
            # Use the direct Jacobian from C.
            jacobian_matrix_exp = transpose(asarray( model.jacobian(param_vector) ) )
            weights = 1. / E.errors**2

    elif E.c_code == False:
        if E.chi2_jacobian:
            # Use the chi2 Jacobian from python.
            jacobian_matrix_exp = E.func_exp_chi2_grad_array(params=param_vector, times=E.times, values=E.values, errors=E.errors)
            weights = ones(E.errors.shape)

        else:
            # Use the direct Jacobian from python.
            jacobian_matrix_exp = E.func_exp_grad(params=param_vector, times=E.times)
            weights = 1. / E.errors**2

    pcov = multifit_covar(J=jacobian_matrix_exp, weights=weights)

    # To compute one standard deviation errors on the parameters, take the square root of the diagonal covariance.
    param_vector_error = sqrt(diag(pcov))

    # Pack to list.
    results = [param_vector, param_vector_error, chi2, iter_count, f_count, g_count, h_count, warning]

    # Return, including errors.
    return results