def model_statistics(self, model_info=None, spin_id=None, global_stats=None): """Return the k, n, and chi2 model statistics. k - number of parameters. n - number of data points. chi2 - the chi-squared value. @keyword model_info: The model information from model_loop(). This is unused. @type model_info: None @keyword spin_id: The spin identification string. This is ignored in the N-state model. @type spin_id: None or str @keyword global_stats: A parameter which determines if global or local statistics are returned. For the N-state model, this argument is ignored. @type global_stats: None or bool @return: The optimisation statistics, in tuple format, of the number of parameters (k), the number of data points (n), and the chi-squared value (chi2). @rtype: tuple of (int, int, float) """ # Return the values. return param_num(), num_data_points(), cdp.chi2
def model_statistics(self, model_info=None, spin_id=None, global_stats=None): """Return the k, n, and chi2 model statistics. k - number of parameters. n - number of data points. chi2 - the chi-squared value. @keyword model_info: The model information from model_loop(). This is unused. @type model_info: None @keyword spin_id: The spin identification string. This is ignored in the N-state model. @type spin_id: None or str @keyword global_stats: A parameter which determines if global or local statistics are returned. For the N-state model, this argument is ignored. @type global_stats: None or bool @return: The optimisation statistics, in tuple format, of the number of parameters (k), the number of data points (n), and the chi-squared value (chi2). @rtype: tuple of (int, int, float) """ # Return the values. return param_num(), num_data_points(), cdp.chi2
def grid_search(self, lower=None, upper=None, inc=None, scaling_matrix=None, constraints=False, verbosity=0, sim_index=None): """The grid search function. @param lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. @type lower: list of lists of floats @param upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. @type upper: list of lists of floats @param inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. @type inc: list of lists of int @keyword scaling_matrix: The per-model list of diagonal and square scaling matrices. @type scaling_matrix: list of numpy rank-2, float64 array or list of None @param constraints: If True, constraints are applied during the grid search (elinating parts of the grid). If False, no constraints are used. @type constraints: bool @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. @type verbosity: int """ # Test if the N-state model has been set up. if not hasattr(cdp, 'model'): raise RelaxNoModelError('N-state') # The number of parameters. n = param_num() # Determine the data type. data_types = base_data_types() # The number of tensors to optimise. tensor_num = align_tensor.num_tensors(skip_fixed=True) # Custom sub-grid search for when only tensors are optimised (as each tensor is independent, the number of points collapses from inc**(5*N) to N*inc**5). if cdp.model == 'fixed' and tensor_num > 1 and ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed() and hasattr(cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed: # Print out. print("Optimising each alignment tensor separately.") # Store the alignment tensor fixed flags. fixed_flags = [] for i in range(len(cdp.align_ids)): # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Store the flag. fixed_flags.append(tensor.fixed) # Fix the tensor. tensor.set('fixed', True) # Loop over each sub-grid. for i in range(len(cdp.align_ids)): # Skip the tensor if originally fixed. if fixed_flags[i]: continue # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Unfix the current tensor. tensor.set('fixed', False) # Grid search parameter subsets. lower_sub = lower[0][i*5:i*5+5] upper_sub = upper[0][i*5:i*5+5] inc_sub = inc[0][i*5:i*5+5] # Minimisation of the sub-grid. self.minimise(min_algor='grid', lower=[lower_sub], upper=[upper_sub], inc=[inc_sub], scaling_matrix=[None], constraints=constraints, verbosity=verbosity, sim_index=sim_index) # Fix the tensor again. tensor.set('fixed', True) # Reset the state of the tensors. for i in range(len(cdp.align_ids)): # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Fix the tensor. tensor.set('fixed', fixed_flags[i]) # All other minimisation. else: self.minimise(min_algor='grid', lower=lower, upper=upper, inc=inc, scaling_matrix=scaling_matrix, constraints=constraints, verbosity=verbosity, sim_index=sim_index)
def grid_search(self, lower=None, upper=None, inc=None, scaling_matrix=None, constraints=False, verbosity=0, sim_index=None): """The grid search function. @param lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. @type lower: list of lists of floats @param upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. @type upper: list of lists of floats @param inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. @type inc: list of lists of int @keyword scaling_matrix: The per-model list of diagonal and square scaling matrices. @type scaling_matrix: list of numpy rank-2, float64 array or list of None @param constraints: If True, constraints are applied during the grid search (elinating parts of the grid). If False, no constraints are used. @type constraints: bool @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. @type verbosity: int """ # Test if the N-state model has been set up. if not hasattr(cdp, 'model'): raise RelaxNoModelError('N-state') # The number of parameters. n = param_num() # Determine the data type. data_types = base_data_types() # The number of tensors to optimise. tensor_num = align_tensor.num_tensors(skip_fixed=True) # Custom sub-grid search for when only tensors are optimised (as each tensor is independent, the number of points collapses from inc**(5*N) to N*inc**5). if cdp.model == 'fixed' and tensor_num > 1 and ( 'rdc' in data_types or 'pcs' in data_types ) and not align_tensor.all_tensors_fixed() and hasattr( cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed: # Print out. print("Optimising each alignment tensor separately.") # Store the alignment tensor fixed flags. fixed_flags = [] for i in range(len(cdp.align_ids)): # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Store the flag. fixed_flags.append(tensor.fixed) # Fix the tensor. tensor.set('fixed', True) # Loop over each sub-grid. for i in range(len(cdp.align_ids)): # Skip the tensor if originally fixed. if fixed_flags[i]: continue # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Unfix the current tensor. tensor.set('fixed', False) # Grid search parameter subsets. lower_sub = lower[0][i * 5:i * 5 + 5] upper_sub = upper[0][i * 5:i * 5 + 5] inc_sub = inc[0][i * 5:i * 5 + 5] # Minimisation of the sub-grid. self.minimise(min_algor='grid', lower=[lower_sub], upper=[upper_sub], inc=[inc_sub], scaling_matrix=[None], constraints=constraints, verbosity=verbosity, sim_index=sim_index) # Fix the tensor again. tensor.set('fixed', True) # Reset the state of the tensors. for i in range(len(cdp.align_ids)): # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Fix the tensor. tensor.set('fixed', fixed_flags[i]) # All other minimisation. else: self.minimise(min_algor='grid', lower=lower, upper=upper, inc=inc, scaling_matrix=scaling_matrix, constraints=constraints, verbosity=verbosity, sim_index=sim_index)