Example #1
0
    def setUp(self):
        """Create a number of objects for the calculation and testing of the relaxation curve-fitting equations."""

        # The parameter scaling.
        self.scaling_list = [1.0, 1000.0]

        # The parameter values at the minimum.
        self.I0 = 1000.0
        self.R = 1.0
        self.params = [
            self.R / self.scaling_list[0], self.I0 / self.scaling_list[1]
        ]

        # The time points.
        relax_times = [0.0, 1.0, 2.0, 3.0, 4.0]

        # The intensities for the above I0 and R.
        I = [
            1000.0, 367.879441171, 135.335283237, 49.7870683679, 18.3156388887
        ]

        # The intensity errors.
        errors = [10.0, 10.0, 10.0, 10.0, 10.0]

        # Setup the C module.
        setup(num_params=2,
              num_times=len(relax_times),
              values=I,
              sd=errors,
              relax_times=relax_times,
              scaling_matrix=self.scaling_list)
Example #2
0
def back_calc():
    """Back calculate the peak intensities.

    The simple two parameter exponential curve (Rx, I0) is assumed.
    """

    # Loop over the spins.
    for spin in spin_loop():
        # Skip deselected spins.
        if not spin.select:
            continue

        # The parameter vector.
        param_vector = array([spin.rx, spin.i0], float64)

        # Initialise the relaxation fit functions.
        setup(num_params=len(spin.params),
              num_times=len(cdp.relax_times),
              values=spin.ave_intensities,
              sd=cdp.sd,
              relax_times=cdp.relax_times,
              scaling_matrix=identity(2, float64))

        # Make a single function call.  This will cause back calculation and the data will be stored in the C module.
        func(param_vector)

        # Get the data and store it in the spin specific data structure.
        spin.fit_int = back_calc_I()
Example #3
0
    def __init__(self,
                 model=None,
                 num_params=None,
                 values=None,
                 errors=None,
                 relax_times=None,
                 scaling_matrix=None):
        """Set up the target function class and alias the target functions.

        @keyword model:             The exponential curve type.  This can be 'exp' for the standard two parameter exponential curve, 'inv' for the inversion recovery experiment, and 'sat' for the saturation recovery experiment.
        @type model:                str
        @keyword num_params:        The number of parameters in the model.
        @type num_params:           int
        @keyword values:            The peak intensities.
        @type values:               list of float
        @keyword errors:            The peak intensity errors.
        @type errors:               list of float
        @keyword relax_times:       The list of relaxation times.
        @type relax_times:          list of float
        @keyword scaling_matrix:    The scaling matrix in a diagonalised list form.
        @type scaling_matrix:       list of float
        """

        # Store the args.
        self.model = model

        # Initialise the C code.
        setup(num_params=num_params,
              num_times=len(relax_times),
              values=values,
              sd=errors,
              relax_times=relax_times,
              scaling_matrix=scaling_matrix)

        # Alias the target functions.
        if model == 'exp':
            self.func = self.func_exp
            self.dfunc = self.dfunc_exp
            self.d2func = self.d2func_exp
        elif model == 'inv':
            self.func = self.func_inv
            self.dfunc = self.dfunc_inv
            self.d2func = self.d2func_inv
        elif model == 'sat':
            self.func = self.func_sat
            self.dfunc = self.dfunc_sat
            self.d2func = self.d2func_sat

        # Alias the Jacobian C functions.
        if model == 'exp':
            self.jacobian = jacobian_exp
            self.jacobian_chi2 = jacobian_chi2_exp
        elif model == 'inv':
            self.jacobian = jacobian_inv
            self.jacobian_chi2 = jacobian_chi2_inv
        elif model == 'sat':
            self.jacobian = jacobian_sat
            self.jacobian_chi2 = jacobian_chi2_sat
Example #4
0
def verify(min_algor='simplex', constraints=None):
    # Instantiate class.
    C = Profile()

    # Instantiate class.
    E = Exp(verbosity=0)

    # List to store chi2.
    chi2_list = []

    for values, errors, times, struct, num_times in C.loop_data():
        # Initialise the function to minimise.
        E.setup_data(values=values, errors=errors, times=times)

        # Initial guess for minimisation. Solved by linear least squares.
        x0 = asarray(E.estimate_x0_exp())

        E.set_settings_minfx(min_algor=min_algor, constraints=constraints)

        # Define func.
        func = func_wrapper
        dfunc = dfunc_wrapper
        d2func = d2func_wrapper

        # Initialise the function to minimise.
        scaling_list = [1.0, 1.0]
        setup(num_params=len(x0),
              num_times=len(E.times),
              values=E.values,
              sd=E.errors,
              relax_times=E.times,
              scaling_matrix=scaling_list)

        results = generic_minimise(func=func,
                                   dfunc=dfunc,
                                   d2func=d2func,
                                   args=(),
                                   x0=x0,
                                   min_algor=E.min_algor,
                                   min_options=E.min_options,
                                   func_tol=E.func_tol,
                                   grad_tol=E.grad_tol,
                                   maxiter=E.max_iterations,
                                   A=E.A,
                                   b=E.b,
                                   full_output=True,
                                   print_flag=E.verbosity)

        # Unpack
        param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

        # Add to list.
        chi2_list.append(chi2)

    return chi2_list
Example #5
0
    def __init__(self, model=None, num_params=None, values=None, errors=None, relax_times=None, scaling_matrix=None):
        """Set up the target function class and alias the target functions.

        @keyword model:             The exponential curve type.  This can be 'exp' for the standard two parameter exponential curve, 'inv' for the inversion recovery experiment, and 'sat' for the saturation recovery experiment.
        @type model:                str
        @keyword num_params:        The number of parameters in the model.
        @type num_params:           int
        @keyword values:            The peak intensities.
        @type values:               list of float
        @keyword errors:            The peak intensity errors.
        @type errors:               list of float
        @keyword relax_times:       The list of relaxation times.
        @type relax_times:          list of float
        @keyword scaling_matrix:    The scaling matrix in a diagonalised list form.
        @type scaling_matrix:       list of float
        """

        # Store the args.
        self.model = model

        # Initialise the C code.
        setup(num_params=num_params, num_times=len(relax_times), values=values, sd=errors, relax_times=relax_times, scaling_matrix=scaling_matrix)

        # Alias the target functions.
        if model == 'exp':
            self.func = self.func_exp
            self.dfunc = self.dfunc_exp
            self.d2func = self.d2func_exp
        elif model == 'inv':
            self.func = self.func_inv
            self.dfunc = self.dfunc_inv
            self.d2func = self.d2func_inv
        elif model == 'sat':
            self.func = self.func_sat
            self.dfunc = self.dfunc_sat
            self.d2func = self.d2func_sat

        # Alias the Jacobian C functions.
        if model == 'exp':
            self.jacobian = jacobian_exp
            self.jacobian_chi2 = jacobian_chi2_exp
        elif model == 'inv':
            self.jacobian = jacobian_inv
            self.jacobian_chi2 = jacobian_chi2_inv
        elif model == 'sat':
            self.jacobian = jacobian_sat
            self.jacobian_chi2 = jacobian_chi2_sat
Example #6
0
    def _back_calc(self, spin=None, relax_time_id=None):
        """Back-calculation of peak intensity for the given relaxation time.

        @keyword spin:              The spin container.
        @type spin:                 SpinContainer instance
        @keyword relax_time_id:     The ID string for the desired relaxation time.
        @type relax_time_id:        str
        @return:                    The peak intensity for the desired relaxation time.
        @rtype:                     float
        """

        # Create the initial parameter vector.
        param_vector = self._assemble_param_vector(spin=spin)

        # Create a scaling matrix.
        scaling_matrix = self._assemble_scaling_matrix(spin=spin, scaling=False)

        # The keys.
        keys = list(spin.intensities.keys())

        # The peak intensities and times.
        values = []
        errors = []
        times = []
        for key in keys:
            values.append(spin.intensities[key])
            errors.append(spin.intensity_err[key])
            times.append(cdp.relax_times[key])

        # The scaling matrix in a diagonalised list form.
        scaling_list = []
        for i in range(len(scaling_matrix)):
            scaling_list.append(scaling_matrix[i, i])

        # Initialise the relaxation fit functions.
        setup(num_params=len(spin.params), num_times=len(cdp.relax_times), values=values, sd=errors, relax_times=times, scaling_matrix=scaling_list)

        # Make a single function call.  This will cause back calculation and the data will be stored in the C module.
        self._func(param_vector)

        # Get the data back.
        results = back_calc_I()

        # Return the correct peak height.
        return results[keys.index(relax_time_id)]
Example #7
0
    def setUp(self):
        """Create a number of objects for the calculation and testing of the relaxation curve-fitting equations."""

        # The parameter scaling.
        self.scaling_list = [1.0, 1000.0]

        # The parameter values at the minimum.
        self.I0 = 1000.0
        self.R = 1.0
        self.params = [self.R/self.scaling_list[0], self.I0/self.scaling_list[1]]

        # The time points.
        relax_times = [0.0, 1.0, 2.0, 3.0, 4.0]

        # The intensities for the above I0 and R.
        I = [1000.0, 367.879441171, 135.335283237, 49.7870683679, 18.3156388887]

        # The intensity errors.
        errors = [10.0, 10.0, 10.0, 10.0, 10.0]

        # Setup the C module.
        setup(num_params=2, num_times=len(relax_times), values=I, sd=errors, relax_times=relax_times, scaling_matrix=self.scaling_list)
def back_calc():
    """Back calculate the peak intensities.

    The simple two parameter exponential curve (Rx, I0) is assumed.
    """

    # Loop over the spins.
    for spin in spin_loop():
        # Skip deselected spins.
        if not spin.select:
            continue

        # The parameter vector.
        param_vector = array([spin.rx, spin.i0], float64)

        # Initialise the relaxation fit functions.
        setup(num_params=len(spin.params), num_times=len(cdp.relax_times), values=spin.ave_intensities, sd=cdp.sd, relax_times=cdp.relax_times, scaling_matrix=identity(2, float64))

        # Make a single function call.  This will cause back calculation and the data will be stored in the C module.
        func(param_vector)

        # Get the data and store it in the spin specific data structure.
        spin.fit_int = back_calc_I()
            I0_m_sim_l = []
            for j in range(sim_boot):
                if j in range(0, 100000, 100):
                    print("Simulation %i"%j)
                # Start minimisation.

                # Produce errors
                I_err = []
                for j, error in enumerate(errors):
                    I_error = gauss(values[j], error)
                    I_err.append(I_error)
                # Convert to numpy array.
                I_err = asarray(I_err)

                x0 = [r2eff, i0]
                setup(num_params=len(x0), num_times=len(times), values=I_err, sd=errors, relax_times=times, scaling_matrix=scaling_list)

                params_minfx_sim_j, chi2_minfx_sim_j, iter_count, f_count, g_count, h_count, warning = generic_minimise(func=func, dfunc=dfunc, d2func=d2func, args=(), x0=x0, min_algor=min_algor, min_options=min_options, full_output=True, print_flag=0)
                R_m_sim_j, I0_m_sim_j = params_minfx_sim_j
                R_m_sim_l.append(R_m_sim_j)
                I0_m_sim_l.append(I0_m_sim_j)

            my_dic[spin_id][param_key]['r2eff_array_boot'] = asarray(R_m_sim_l)
            my_dic[spin_id][param_key]['i0_array_boot'] = asarray(I0_m_sim_l)

            # Get stats on distribution.
            sigma_R_sim = std(asarray(R_m_sim_l), ddof=1)
            sigma_I0_sim = std(asarray(I0_m_sim_l), ddof=1)
            my_dic[spin_id][param_key]['r2eff_err_boot'] = sigma_R_sim
            my_dic[spin_id][param_key]['i0_err_boot'] = sigma_I0_sim
            I0_m_sim_l = []
            for j in range(sim_boot):
                if j in range(0, 100000, 100):
                    print("Simulation %i"%j)
                # Start minimisation.

                # Produce errors
                I_err = []
                for j, error in enumerate(errors):
                    I_error = gauss(values[j], error)
                    I_err.append(I_error)
                # Convert to numpy array.
                I_err = asarray(I_err)

                x0 = [r2eff, i0]
                setup(num_params=len(x0), num_times=len(times), values=I_err, sd=errors, relax_times=times, scaling_matrix=scaling_list)

                params_minfx_sim_j, chi2_minfx_sim_j, iter_count, f_count, g_count, h_count, warning = generic_minimise(func=func, dfunc=dfunc, d2func=d2func, args=(), x0=x0, min_algor=min_algor, min_options=min_options, full_output=True, print_flag=0)
                R_m_sim_j, I0_m_sim_j = params_minfx_sim_j
                R_m_sim_l.append(R_m_sim_j)
                I0_m_sim_l.append(I0_m_sim_j)

            my_dic[spin_id][param_key]['r2eff_array_boot'] = asarray(R_m_sim_l)
            my_dic[spin_id][param_key]['i0_array_boot'] = asarray(I0_m_sim_l)

            # Get stats on distribution.
            sigma_R_sim = std(asarray(R_m_sim_l), ddof=1)
            sigma_I0_sim = std(asarray(I0_m_sim_l), ddof=1)
            my_dic[spin_id][param_key]['r2eff_err_boot'] = sigma_R_sim
            my_dic[spin_id][param_key]['i0_err_boot'] = sigma_I0_sim
Example #11
0
    def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling=True, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
        """Relaxation curve fitting minimisation method.

        @keyword min_algor:         The minimisation algorithm to use.
        @type min_algor:            str
        @keyword min_options:       An array of options to be used by the minimisation algorithm.
        @type min_options:          array of str
        @keyword func_tol:          The function tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
        @type func_tol:             None or float
        @keyword grad_tol:          The gradient tolerance which, when reached, terminates optimisation.  Setting this to None turns of the check.
        @type grad_tol:             None or float
        @keyword max_iterations:    The maximum number of iterations for the algorithm.
        @type max_iterations:       int
        @keyword constraints:       If True, constraints are used during optimisation.
        @type constraints:          bool
        @keyword scaling:           If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned.
        @type scaling:              bool
        @keyword verbosity:         The amount of information to print.  The higher the value, the greater the verbosity.
        @type verbosity:            int
        @keyword sim_index:         The index of the simulation to optimise.  This should be None if normal optimisation is desired.
        @type sim_index:            None or int
        @keyword lower:             The lower bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type lower:                array of numbers
        @keyword upper:             The upper bounds of the grid search which must be equal to the number of parameters in the model.  This optional argument is only used when doing a grid search.
        @type upper:                array of numbers
        @keyword inc:               The increments for each dimension of the space for the grid search.  The number of elements in the array must equal to the number of parameters in the model.  This argument is only used when doing a grid search.
        @type inc:                  array of int
        """

        # Test if sequence data is loaded.
        if not exists_mol_res_spin_data():
            raise RelaxNoSequenceError

        # Loop over the sequence.
        for spin, mol_name, res_num, res_name in spin_loop(full_info=True):
            # Skip deselected spins.
            if not spin.select:
                continue

            # Skip spins which have no data.
            if not hasattr(spin, 'intensities'):
                continue

            # Create the initial parameter vector.
            param_vector = self._assemble_param_vector(spin=spin)

            # Diagonal scaling.
            scaling_matrix = self._assemble_scaling_matrix(spin=spin, scaling=scaling)
            if len(scaling_matrix):
                param_vector = dot(inv(scaling_matrix), param_vector)

            # Get the grid search minimisation options.
            if match('^[Gg]rid', min_algor):
                inc, lower_new, upper_new = self._grid_search_setup(spin=spin, param_vector=param_vector, lower=lower, upper=upper, inc=inc, scaling_matrix=scaling_matrix)

            # Linear constraints.
            if constraints:
                A, b = self._linear_constraints(spin=spin, scaling_matrix=scaling_matrix)
            else:
                A, b = None, None

            # Print out.
            if verbosity >= 1:
                # Get the spin id string.
                spin_id = generate_spin_id_unique(mol_name=mol_name, res_num=res_num, res_name=res_name, spin_num=spin.num, spin_name=spin.name)

                # Individual spin printout.
                if verbosity >= 2:
                    print("\n\n")

                string = "Fitting to spin " + repr(spin_id)
                print("\n\n" + string)
                print(len(string) * '~')


            # Initialise the function to minimise.
            ######################################

            # The keys.
            keys = list(spin.intensities.keys())

            # The peak intensities and times.
            values = []
            errors = []
            times = []
            for key in keys:
                # The values.
                if sim_index == None:
                    values.append(spin.intensities[key])
                else:
                    values.append(spin.sim_intensities[sim_index][key])

                # The errors.
                errors.append(spin.intensity_err[key])

                # The relaxation times.
                times.append(cdp.relax_times[key])

            # The scaling matrix in a diagonalised list form.
            scaling_list = []
            for i in range(len(scaling_matrix)):
                scaling_list.append(scaling_matrix[i, i])

            setup(num_params=len(spin.params), num_times=len(values), values=values, sd=errors, relax_times=times, scaling_matrix=scaling_list)


            # Setup the minimisation algorithm when constraints are present.
            ################################################################

            if constraints and not match('^[Gg]rid', min_algor):
                algor = min_options[0]
            else:
                algor = min_algor


            # Levenberg-Marquardt minimisation.
            ###################################

            if match('[Ll][Mm]$', algor) or match('[Ll]evenburg-[Mm]arquardt$', algor):
                # Reconstruct the error data structure.
                lm_error = zeros(len(spin.relax_times), float64)
                index = 0
                for k in range(len(spin.relax_times)):
                    lm_error[index:index+len(relax_error[k])] = relax_error[k]
                    index = index + len(relax_error[k])

                min_options = min_options + (self.relax_fit.lm_dri, lm_error)


            # Minimisation.
            ###############

            # Grid search.
            if search('^[Gg]rid', min_algor):
                results = grid(func=self._func, args=(), num_incs=inc, lower=lower_new, upper=upper_new, A=A, b=b, verbosity=verbosity)

                # Unpack the results.
                param_vector, chi2, iter_count, warning = results
                f_count = iter_count
                g_count = 0.0
                h_count = 0.0

            # Minimisation.
            else:
                results = generic_minimise(func=self._func, dfunc=self._dfunc, d2func=self._d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity)

                # Unpack the results.
                if results == None:
                    return
                param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results

            # Scaling.
            if scaling:
                param_vector = dot(scaling_matrix, param_vector)

            # Disassemble the parameter vector.
            self._disassemble_param_vector(param_vector=param_vector, spin=spin, sim_index=sim_index)

            # Monte Carlo minimisation statistics.
            if sim_index != None:
                # Chi-squared statistic.
                spin.chi2_sim[sim_index] = chi2

                # Iterations.
                spin.iter_sim[sim_index] = iter_count

                # Function evaluations.
                spin.f_count_sim[sim_index] = f_count

                # Gradient evaluations.
                spin.g_count_sim[sim_index] = g_count

                # Hessian evaluations.
                spin.h_count_sim[sim_index] = h_count

                # Warning.
                spin.warning_sim[sim_index] = warning


            # Normal statistics.
            else:
                # Chi-squared statistic.
                spin.chi2 = chi2

                # Iterations.
                spin.iter = iter_count

                # Function evaluations.
                spin.f_count = f_count

                # Gradient evaluations.
                spin.g_count = g_count

                # Hessian evaluations.
                spin.h_count = h_count

                # Warning.
                spin.warning = warning