def get_param_values(self, model_info=None, sim_index=None): """Return a vector of parameter values. @keyword model_info: The spin container and the spin ID string from the _model_loop_spin() method. @type model_info: SpinContainer instance, str @keyword sim_index: The optional Monte Carlo simulation index. @type sim_index: int @return: The vector of parameter values. @rtype: list of str """ # Unpack the data. spin, spin_id = model_info # Return the vector. return assemble_param_vector(spin=spin, sim_index=sim_index)
def back_calc(spin=None, relax_time_id=None): """Back-calculation of peak intensity for the given relaxation time. @keyword spin: The spin container. @type spin: SpinContainer instance @keyword relax_time_id: The ID string for the desired relaxation time. @type relax_time_id: str @return: The peak intensity for the desired relaxation time. @rtype: float """ # Create the initial parameter vector. param_vector = assemble_param_vector(spin=spin) # The keys. keys = list(spin.peak_intensity.keys()) # The peak intensities and times. values = [] errors = [] times = [] for key in keys: values.append(spin.peak_intensity[key]) errors.append(spin.peak_intensity_err[key]) times.append(cdp.relax_times[key]) # A fake scaling matrix in a diagonalised list form. scaling_list = [] for i in range(len(param_vector)): scaling_list.append(1.0) # Initialise the relaxation fit functions. model = Relax_fit_opt(model=spin.model, num_params=len(spin.params), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list) # Make a single function call. This will cause back calculation and the data will be stored in the C module. model.func(param_vector) # Get the data back. results = model.back_calc_data() # Return the correct peak height. return results[keys.index(relax_time_id)]
def covariance_matrix(self, model_info=None, verbosity=1): """Return the Jacobian and weights required for parameter errors via the covariance matrix. @keyword model_info: The spin container and the spin ID string from the _model_loop_spin() method. @type model_info: SpinContainer instance, str @keyword verbosity: The amount of information to print. The higher the value, the greater the verbosity. @type verbosity: int @return: The Jacobian and weight matrices for the given model. @rtype: numpy rank-2 array, numpy rank-2 array """ # Unpack the data. spin, spin_id = model_info # Check that the C modules have been compiled. if not C_module_exp_fn: raise RelaxError("Relaxation curve fitting is not available. Try compiling the C modules on your platform.") # Raise Error, if not optimised. if not (hasattr(spin, 'rx') and hasattr(spin, 'i0')): raise RelaxError("Spin '%s' does not contain optimised 'rx' and 'i0' values. Try execute: minimise.execute(min_algor='Newton', constraints=False)"%(spin_id)) # Raise warning, if gradient count is 0. This could point to a lack of minimisation first. if hasattr(spin, 'g_count'): if getattr(spin, 'g_count') == 0.0: text = "Spin %s contains a gradient count of 0.0. Is the rx parameter optimised? Try execute: minimise.execute(min_algor='Newton', constraints=False)" %(spin_id) warn(RelaxWarning("%s." % text)) # Print information. if verbosity >= 1: # Individual spin block section. top = 2 if verbosity >= 2: top += 2 subsection(file=sys.stdout, text="Estimating rx error for spin: %s"%spin_id, prespace=top) # The peak intensities and times. values = [] errors = [] times = [] for key in spin.peak_intensity: values.append(spin.peak_intensity[key]) errors.append(spin.peak_intensity_err[key]) times.append(cdp.relax_times[key]) # Convert to numpy array. values = asarray(values) errors = asarray(errors) times = asarray(times) # Create the parameter vector and scaling matrix (as a diagonalised list). param_vector = assemble_param_vector(spin=spin) scaling_list = [] for i in range(len(spin.params)): scaling_list.append(1.0) # Initialise data in C code. model = Relax_fit_opt(model=spin.model, num_params=len(param_vector), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list) # Use the direct Jacobian from function. jacobian_matrix_exp = transpose(asarray( model.jacobian(param_vector) ) ) weights = 1. / errors**2 # Return the matrices. return jacobian_matrix_exp, weights
def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None): """Relaxation curve fitting minimisation method. @keyword min_algor: The minimisation algorithm to use. @type min_algor: str @keyword min_options: An array of options to be used by the minimisation algorithm. @type min_options: array of str @keyword func_tol: The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. @type func_tol: None or float @keyword grad_tol: The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. @type grad_tol: None or float @keyword max_iterations: The maximum number of iterations for the algorithm. @type max_iterations: int @keyword constraints: If True, constraints are used during optimisation. @type constraints: bool @keyword scaling_matrix: The per-model list of diagonal and square scaling matrices. @type scaling_matrix: list of numpy rank-2, float64 array or list of None @keyword verbosity: The amount of information to print. The higher the value, the greater the verbosity. @type verbosity: int @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. @type sim_index: None or int @keyword lower: The per-model lower bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. @type lower: list of lists of numbers @keyword upper: The per-model upper bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. @type upper: list of lists of numbers @keyword inc: The per-model increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. This argument is only used when doing a grid search. @type inc: list of lists of int """ # Checks. check_mol_res_spin_data() # Loop over the sequence. model_index = 0 for spin, spin_id in self.model_loop(): # Skip deselected spins. if not spin.select: continue # Skip spins which have no data. if not hasattr(spin, 'peak_intensity'): continue # Create the initial parameter vector. param_vector = assemble_param_vector(spin=spin) # Diagonal scaling. if scaling_matrix[model_index] is not None: param_vector = dot(inv(scaling_matrix[model_index]), param_vector) # Linear constraints. if constraints: A, b = linear_constraints(spin=spin, scaling_matrix=scaling_matrix[model_index]) else: A, b = None, None # Print out. if verbosity >= 1: # Individual spin printout. if verbosity >= 2: print("\n\n") string = "Fitting to spin " + repr(spin_id) print("\n\n" + string) print(len(string) * '~') # Initialise the function to minimise. ###################################### # The peak intensities and times. values = [] errors = [] times = [] for key in spin.peak_intensity: # The values. if sim_index == None: values.append(spin.peak_intensity[key]) else: values.append(spin.peak_intensity_sim[sim_index][key]) # The errors. errors.append(spin.peak_intensity_err[key]) # The relaxation times. times.append(cdp.relax_times[key]) # The scaling matrix in a diagonalised list form. scaling_list = [] if scaling_matrix[model_index] is None: for i in range(len(param_vector)): scaling_list.append(1.0) else: for i in range(len(scaling_matrix[model_index])): scaling_list.append(scaling_matrix[model_index][i, i]) # Set up the target function. model = Relax_fit_opt(model=spin.model, num_params=len(spin.params), values=values, errors=errors, relax_times=times, scaling_matrix=scaling_list) # Setup the minimisation algorithm when constraints are present. ################################################################ if constraints and not match('^[Gg]rid', min_algor): algor = min_options[0] else: algor = min_algor # Levenberg-Marquardt minimisation. ################################### if match('[Ll][Mm]$', algor) or match('[Ll]evenburg-[Mm]arquardt$', algor): # Reconstruct the error data structure. lm_error = zeros(len(spin.relax_times), float64) index = 0 for k in range(len(spin.relax_times)): lm_error[index:index+len(relax_error[k])] = relax_error[k] index = index + len(relax_error[k]) min_options = min_options + (self.relax_fit.lm_dri, lm_error) # Minimisation. ############### # Grid search. if search('^[Gg]rid', min_algor): results = grid(func=model.func, args=(), num_incs=inc[model_index], lower=lower[model_index], upper=upper[model_index], A=A, b=b, verbosity=verbosity) # Unpack the results. param_vector, chi2, iter_count, warning = results f_count = iter_count g_count = 0.0 h_count = 0.0 # Minimisation. else: results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=True, print_flag=verbosity) # Unpack the results. if results == None: return param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results # Scaling. if scaling_matrix[model_index] is not None: param_vector = dot(scaling_matrix[model_index], param_vector) # Disassemble the parameter vector. disassemble_param_vector(param_vector=param_vector, spin=spin, sim_index=sim_index) # Monte Carlo minimisation statistics. if sim_index != None: # Chi-squared statistic. spin.chi2_sim[sim_index] = chi2 # Iterations. spin.iter_sim[sim_index] = iter_count # Function evaluations. spin.f_count_sim[sim_index] = f_count # Gradient evaluations. spin.g_count_sim[sim_index] = g_count # Hessian evaluations. spin.h_count_sim[sim_index] = h_count # Warning. spin.warning_sim[sim_index] = warning # Normal statistics. else: # Chi-squared statistic. spin.chi2 = chi2 # Iterations. spin.iter = iter_count # Function evaluations. spin.f_count = f_count # Gradient evaluations. spin.g_count = g_count # Hessian evaluations. spin.h_count = h_count # Warning. spin.warning = warning # Increment the model index. model_index += 1