def minimise_setup_fixed_tensors(): """Set up the data structures for the fixed alignment tensors. @return: The assembled data structures for the fixed alignment tensors. @rtype: numpy rank-1 array. """ # Initialise. n = align_tensor.num_tensors(skip_fixed=False) - align_tensor.num_tensors( skip_fixed=True) tensors = zeros(n * 5, float64) # Nothing to do. if n == 0: return None # Loop over the tensors. index = 0 for i in range(len(cdp.align_tensors)): # Skip non-optimised data. if not opt_uses_align_data(cdp.align_tensors[i].name): continue # No parameters have been set. if not hasattr(cdp.align_tensors[i], 'Axx'): continue # The real tensors. tensors[5 * index + 0] = cdp.align_tensors[i].Axx tensors[5 * index + 1] = cdp.align_tensors[i].Ayy tensors[5 * index + 2] = cdp.align_tensors[i].Axy tensors[5 * index + 3] = cdp.align_tensors[i].Axz tensors[5 * index + 4] = cdp.align_tensors[i].Ayz # Increment the index. index += 1 # Return the data structure. return tensors
def minimise_setup_fixed_tensors(): """Set up the data structures for the fixed alignment tensors. @return: The assembled data structures for the fixed alignment tensors. @rtype: numpy rank-1 array. """ # Initialise. n = align_tensor.num_tensors(skip_fixed=False) - align_tensor.num_tensors(skip_fixed=True) tensors = zeros(n*5, float64) # Nothing to do. if n == 0: return None # Loop over the tensors. index = 0 for i in range(len(cdp.align_tensors)): # Skip non-optimised data. if not opt_uses_align_data(cdp.align_tensors[i].name): continue # No parameters have been set. if not hasattr(cdp.align_tensors[i], 'Axx'): continue # The real tensors. tensors[5*index + 0] = cdp.align_tensors[i].Axx tensors[5*index + 1] = cdp.align_tensors[i].Ayy tensors[5*index + 2] = cdp.align_tensors[i].Axy tensors[5*index + 3] = cdp.align_tensors[i].Axz tensors[5*index + 4] = cdp.align_tensors[i].Ayz # Increment the index. index += 1 # Return the data structure. return tensors
def sim_return_param(self, index, model_info=None): """Return the array of simulation parameter values. @param index: The index of the parameter to return the array of values for. @type index: int @keyword model_info: The model information from model_loop(). This is unused. @type model_info: None @return: The array of simulation parameter values. @rtype: list of float """ # Align parameters. names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] # Alignment tensor parameters. if index < align_tensor.num_tensors(skip_fixed=True)*5: # The tensor and parameter index. param_index = index % 5 tensor_index = (index - index % 5) / 5 # Return the simulation parameter array. tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True) return getattr(tensor, names[param_index]+'_sim')
def sim_return_param(self, index, model_info=None): """Return the array of simulation parameter values. @param index: The index of the parameter to return the array of values for. @type index: int @keyword model_info: The model information from model_loop(). This is unused. @type model_info: None @return: The array of simulation parameter values. @rtype: list of float """ # Align parameters. names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] # Alignment tensor parameters. if index < align_tensor.num_tensors(skip_fixed=True) * 5: # The tensor and parameter index. param_index = index % 5 tensor_index = (index - index % 5) / 5 # Return the simulation parameter array. tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True) return getattr(tensor, names[param_index] + '_sim')
def grid_search(self, lower=None, upper=None, inc=None, scaling_matrix=None, constraints=False, verbosity=0, sim_index=None): """The grid search function. @param lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. @type lower: list of lists of floats @param upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. @type upper: list of lists of floats @param inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. @type inc: list of lists of int @keyword scaling_matrix: The per-model list of diagonal and square scaling matrices. @type scaling_matrix: list of numpy rank-2, float64 array or list of None @param constraints: If True, constraints are applied during the grid search (elinating parts of the grid). If False, no constraints are used. @type constraints: bool @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. @type verbosity: int """ # Test if the N-state model has been set up. if not hasattr(cdp, 'model'): raise RelaxNoModelError('N-state') # The number of parameters. n = param_num() # Determine the data type. data_types = base_data_types() # The number of tensors to optimise. tensor_num = align_tensor.num_tensors(skip_fixed=True) # Custom sub-grid search for when only tensors are optimised (as each tensor is independent, the number of points collapses from inc**(5*N) to N*inc**5). if cdp.model == 'fixed' and tensor_num > 1 and ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed() and hasattr(cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed: # Print out. print("Optimising each alignment tensor separately.") # Store the alignment tensor fixed flags. fixed_flags = [] for i in range(len(cdp.align_ids)): # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Store the flag. fixed_flags.append(tensor.fixed) # Fix the tensor. tensor.set('fixed', True) # Loop over each sub-grid. for i in range(len(cdp.align_ids)): # Skip the tensor if originally fixed. if fixed_flags[i]: continue # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Unfix the current tensor. tensor.set('fixed', False) # Grid search parameter subsets. lower_sub = lower[0][i*5:i*5+5] upper_sub = upper[0][i*5:i*5+5] inc_sub = inc[0][i*5:i*5+5] # Minimisation of the sub-grid. self.minimise(min_algor='grid', lower=[lower_sub], upper=[upper_sub], inc=[inc_sub], scaling_matrix=[None], constraints=constraints, verbosity=verbosity, sim_index=sim_index) # Fix the tensor again. tensor.set('fixed', True) # Reset the state of the tensors. for i in range(len(cdp.align_ids)): # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Fix the tensor. tensor.set('fixed', fixed_flags[i]) # All other minimisation. else: self.minimise(min_algor='grid', lower=lower, upper=upper, inc=inc, scaling_matrix=scaling_matrix, constraints=constraints, verbosity=verbosity, sim_index=sim_index)
def grid_search(self, lower=None, upper=None, inc=None, scaling_matrix=None, constraints=False, verbosity=0, sim_index=None): """The grid search function. @param lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. @type lower: list of lists of floats @param upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. @type upper: list of lists of floats @param inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. @type inc: list of lists of int @keyword scaling_matrix: The per-model list of diagonal and square scaling matrices. @type scaling_matrix: list of numpy rank-2, float64 array or list of None @param constraints: If True, constraints are applied during the grid search (elinating parts of the grid). If False, no constraints are used. @type constraints: bool @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. @type verbosity: int """ # Test if the N-state model has been set up. if not hasattr(cdp, 'model'): raise RelaxNoModelError('N-state') # The number of parameters. n = param_num() # Determine the data type. data_types = base_data_types() # The number of tensors to optimise. tensor_num = align_tensor.num_tensors(skip_fixed=True) # Custom sub-grid search for when only tensors are optimised (as each tensor is independent, the number of points collapses from inc**(5*N) to N*inc**5). if cdp.model == 'fixed' and tensor_num > 1 and ( 'rdc' in data_types or 'pcs' in data_types ) and not align_tensor.all_tensors_fixed() and hasattr( cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed: # Print out. print("Optimising each alignment tensor separately.") # Store the alignment tensor fixed flags. fixed_flags = [] for i in range(len(cdp.align_ids)): # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Store the flag. fixed_flags.append(tensor.fixed) # Fix the tensor. tensor.set('fixed', True) # Loop over each sub-grid. for i in range(len(cdp.align_ids)): # Skip the tensor if originally fixed. if fixed_flags[i]: continue # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Unfix the current tensor. tensor.set('fixed', False) # Grid search parameter subsets. lower_sub = lower[0][i * 5:i * 5 + 5] upper_sub = upper[0][i * 5:i * 5 + 5] inc_sub = inc[0][i * 5:i * 5 + 5] # Minimisation of the sub-grid. self.minimise(min_algor='grid', lower=[lower_sub], upper=[upper_sub], inc=[inc_sub], scaling_matrix=[None], constraints=constraints, verbosity=verbosity, sim_index=sim_index) # Fix the tensor again. tensor.set('fixed', True) # Reset the state of the tensors. for i in range(len(cdp.align_ids)): # Get the tensor object. tensor = align_tensor.return_tensor(index=i, skip_fixed=False) # Fix the tensor. tensor.set('fixed', fixed_flags[i]) # All other minimisation. else: self.minimise(min_algor='grid', lower=lower, upper=upper, inc=inc, scaling_matrix=scaling_matrix, constraints=constraints, verbosity=verbosity, sim_index=sim_index)