def simulate(file="simulation.pdb.bz2", dir=None, step_size=2.0, snapshot=10, total=1000, model=1, force=True): """Pseudo-Brownian dynamics simulation of the frame order motions. @keyword file: The PDB file for storing the frame order pseudo-Brownian dynamics simulation. The compression is determined automatically by the file extensions '*.pdb', '*.pdb.gz', and '*.pdb.bz2'. @type file: str @keyword dir: The directory name to place the file into. @type dir: str or None @keyword step_size: The rotation will be of a random direction but with this fixed angle. The value is in degrees. @type step_size: float @keyword snapshot: The number of steps in the simulation when snapshots will be taken. @type snapshot: int @keyword total: The total number of snapshots to take before stopping the simulation. @type total: int @keyword model: Only one model from an analysed ensemble of structures can be used for the pseudo-Brownian simulation, as the simulation and corresponding PDB file consists of one model per simulation. @type model: int @keyword force: A flag which, if set to True, will overwrite the any pre-existing file. @type force: bool """ # Printout. print("Pseudo-Brownian dynamics simulation of the frame order motions.") # Checks. check_pipe() check_model() check_domain() check_parameters() check_pivot() # Skip the rigid model. if cdp.model == MODEL_RIGID: print("Skipping the rigid model.") return # Open the output file. file = open_write_file(file_name=file, dir=dir, force=force) # The parameter values. values = assemble_param_vector() params = {} i = 0 for name in cdp.params: params[name] = values[i] i += 1 # The structure. structure = deepcopy(cdp.structure) if structure.num_models() > 1: structure.collapse_ensemble(model_num=model) # The pivot points. num_states = 1 if cdp.model == MODEL_DOUBLE_ROTOR: num_states = 2 pivot = zeros((num_states, 3), float64) for i in range(num_states): pivot[i] = generate_pivot(order=i+1, pdb_limit=True) # Shift to the average position. average_position(structure=structure, models=[None]) # The motional eigenframe. frame = generate_axis_system() # Create the distribution. brownian(file=file, model=cdp.model, structure=structure, parameters=params, eigenframe=frame, pivot=pivot, atom_id=domain_moving(), step_size=step_size, snapshot=snapshot, total=total) # Close the file. file.close()
def distribute(file="distribution.pdb.bz2", dir=None, atom_id=None, total=1000, max_rotations=100000, model=1, force=True): """Create a uniform distribution of structures for the frame order motions. @keyword file: The PDB file for storing the frame order motional distribution. The compression is determined automatically by the file extensions '*.pdb', '*.pdb.gz', and '*.pdb.bz2'. @type file: str @keyword dir: The directory name to place the file into. @type dir: str or None @keyword atom_id: The atom identification string to allow the distribution to be a subset of all atoms. @type atom_id: None or str @keyword total: The total number of states/model/structures in the distribution. @type total: int @keyword max_rotations: The maximum number of rotations to generate the distribution from. This prevents an execution for an infinite amount of time when a frame order amplitude parameter is close to zero so that the subset of all rotations within the distribution is close to zero. @type max_rotations: int @keyword model: Only one model from an analysed ensemble of structures can be used for the distribution, as the corresponding PDB file consists of one model per state. @type model: int @keyword force: A flag which, if set to True, will overwrite the any pre-existing file. @type force: bool """ # Printout. print("Uniform distribution of structures representing the frame order motions.") # Check the total. if total > 9999: raise RelaxError("A maximum of 9999 models is allowed in the PDB format.") # Checks. check_pipe() check_model() check_domain() check_parameters() check_pivot() # Skip the rigid model. if cdp.model == MODEL_RIGID: print("Skipping the rigid model.") return # Open the output file. file = open_write_file(file_name=file, dir=dir, force=force) # The parameter values. values = assemble_param_vector() params = {} i = 0 for name in cdp.params: params[name] = values[i] i += 1 # The structure. structure = deepcopy(cdp.structure) if structure.num_models() > 1: structure.collapse_ensemble(model_num=model) # The pivot points. num_states = 1 if cdp.model == MODEL_DOUBLE_ROTOR: num_states = 2 pivot = zeros((num_states, 3), float64) for i in range(num_states): pivot[i] = generate_pivot(order=i+1, pdb_limit=True) # Shift to the average position. average_position(structure=structure, models=[None]) # The motional eigenframe. frame = generate_axis_system() # Only work with a subset. if atom_id: # The inverted selection. selection = structure.selection(atom_id=atom_id, inv=True) # Delete the data. structure.delete(selection=selection, verbosity=0) # Create the distribution. uniform_distribution(file=file, model=cdp.model, structure=structure, parameters=params, eigenframe=frame, pivot=pivot, atom_id=domain_moving(), total=total, max_rotations=max_rotations) # Close the file. file.close()
def distribute(file="distribution.pdb.bz2", dir=None, atom_id=None, total=1000, max_rotations=100000, model=1, force=True): """Create a uniform distribution of structures for the frame order motions. @keyword file: The PDB file for storing the frame order motional distribution. The compression is determined automatically by the file extensions '*.pdb', '*.pdb.gz', and '*.pdb.bz2'. @type file: str @keyword dir: The directory name to place the file into. @type dir: str or None @keyword atom_id: The atom identification string to allow the distribution to be a subset of all atoms. @type atom_id: None or str @keyword total: The total number of states/model/structures in the distribution. @type total: int @keyword max_rotations: The maximum number of rotations to generate the distribution from. This prevents an execution for an infinite amount of time when a frame order amplitude parameter is close to zero so that the subset of all rotations within the distribution is close to zero. @type max_rotations: int @keyword model: Only one model from an analysed ensemble of structures can be used for the distribution, as the corresponding PDB file consists of one model per state. @type model: int @keyword force: A flag which, if set to True, will overwrite the any pre-existing file. @type force: bool """ # Printout. print( "Uniform distribution of structures representing the frame order motions." ) # Check the total. if total > 9999: raise RelaxError( "A maximum of 9999 models is allowed in the PDB format.") # Checks. check_pipe() check_model() check_domain() check_parameters() check_pivot() # Skip the rigid model. if cdp.model == MODEL_RIGID: print("Skipping the rigid model.") return # Open the output file. file = open_write_file(file_name=file, dir=dir, force=force) # The parameter values. values = assemble_param_vector() params = {} i = 0 for name in cdp.params: params[name] = values[i] i += 1 # The structure. structure = deepcopy(cdp.structure) if structure.num_models() > 1: structure.collapse_ensemble(model_num=model) # The pivot points. num_states = 1 if cdp.model == MODEL_DOUBLE_ROTOR: num_states = 2 pivot = zeros((num_states, 3), float64) for i in range(num_states): pivot[i] = generate_pivot(order=i + 1, pdb_limit=True) # Shift to the average position. average_position(structure=structure, models=[None]) # The motional eigenframe. frame = generate_axis_system() # Only work with a subset. if atom_id: # The inverted selection. selection = structure.selection(atom_id=atom_id, inv=True) # Delete the data. structure.delete(selection=selection, verbosity=0) # Create the distribution. uniform_distribution(file=file, model=cdp.model, structure=structure, parameters=params, eigenframe=frame, pivot=pivot, atom_id=domain_moving(), total=total, max_rotations=max_rotations) # Close the file. file.close()
def simulate(file="simulation.pdb.bz2", dir=None, step_size=2.0, snapshot=10, total=1000, model=1, force=True): """Pseudo-Brownian dynamics simulation of the frame order motions. @keyword file: The PDB file for storing the frame order pseudo-Brownian dynamics simulation. The compression is determined automatically by the file extensions '*.pdb', '*.pdb.gz', and '*.pdb.bz2'. @type file: str @keyword dir: The directory name to place the file into. @type dir: str or None @keyword step_size: The rotation will be of a random direction but with this fixed angle. The value is in degrees. @type step_size: float @keyword snapshot: The number of steps in the simulation when snapshots will be taken. @type snapshot: int @keyword total: The total number of snapshots to take before stopping the simulation. @type total: int @keyword model: Only one model from an analysed ensemble of structures can be used for the pseudo-Brownian simulation, as the simulation and corresponding PDB file consists of one model per simulation. @type model: int @keyword force: A flag which, if set to True, will overwrite the any pre-existing file. @type force: bool """ # Printout. print("Pseudo-Brownian dynamics simulation of the frame order motions.") # Checks. check_pipe() check_model() check_domain() check_parameters() check_pivot() # Skip the rigid model. if cdp.model == MODEL_RIGID: print("Skipping the rigid model.") return # Open the output file. file = open_write_file(file_name=file, dir=dir, force=force) # The parameter values. values = assemble_param_vector() params = {} i = 0 for name in cdp.params: params[name] = values[i] i += 1 # The structure. structure = deepcopy(cdp.structure) if structure.num_models() > 1: structure.collapse_ensemble(model_num=model) # The pivot points. num_states = 1 if cdp.model == MODEL_DOUBLE_ROTOR: num_states = 2 pivot = zeros((num_states, 3), float64) for i in range(num_states): pivot[i] = generate_pivot(order=i + 1, pdb_limit=True) # Shift to the average position. average_position(structure=structure, models=[None]) # The motional eigenframe. frame = generate_axis_system() # Create the distribution. brownian(file=file, model=cdp.model, structure=structure, parameters=params, eigenframe=frame, pivot=pivot, atom_id=domain_moving(), step_size=step_size, snapshot=snapshot, total=total) # Close the file. file.close()
def decompose(root="decomposed", dir=None, atom_id=None, model=1, force=True): """Structural representation of the individual frame order motional components. @keyword root: The file root for the PDB files created. Each motional component will be represented by a different PDB file appended with '_mode1.pdb', '_mode2.pdb', '_mode3.pdb', etc. @type root: str @keyword dir: The directory name to place the file into. @type dir: str or None @keyword atom_id: The atom identification string to allow the decomposition to be applied to subset of all atoms. @type atom_id: None or str @keyword model: Only one model from an analysed ensemble of structures can be used for the decomposition, as the corresponding PDB file consists of one model per state. @type model: int @keyword force: A flag which, if set to True, will overwrite the any pre-existing file. @type force: bool """ # Printout. print( "PDB representation of the individual components of the frame order motions." ) # Checks. check_pipe() check_model() check_domain() check_parameters() check_pivot() # Skip any unsupported models. unsupported = [MODEL_RIGID, MODEL_DOUBLE_ROTOR] if cdp.model in unsupported: print("Skipping the unsupported '%s' model." % cdp.model) return # Initialise the angle vector (cone opening angle 1, cone opening angle 2, torsion angle). angles = zeros(3, float64) # Cone opening. if cdp.model in MODEL_LIST_ISO_CONE: angles[0] = angles[1] = cdp.cone_theta elif cdp.model in MODEL_LIST_PSEUDO_ELLIPSE: angles[0] = cdp.cone_theta_y angles[1] = cdp.cone_theta_x # Non-zero torsion angle. if cdp.model in MODEL_LIST_FREE_ROTORS: angles[2] = pi elif cdp.model in MODEL_LIST_RESTRICTED_TORSION: angles[2] = cdp.cone_sigma_max # The motional eigenframe. frame = generate_axis_system() # Mode ordering from largest to smallest. indices = argsort(angles) angles = angles[indices[::-1]] frame = transpose(transpose(frame)[indices[::-1]]) # The pivot point. pivot = generate_pivot(order=1, pdb_limit=True) # Loop over each mode. for i in range(3): # Skip modes with no motion. if angles[i] < 1e-7: continue # Open the output file. file_name = "%s_mode%i.pdb" % (root, i + 1) file = open_write_file(file_name=file_name, dir=dir, force=force) # The structure. structure = deepcopy(cdp.structure) if structure.num_models() > 1: structure.collapse_ensemble(model_num=model) # Shift to the average position. average_position(structure=structure, models=[None]) # Create the representation. mode_distribution(file=file, structure=structure, axis=frame[:, i], angle=angles[i], pivot=pivot, atom_id=domain_moving()) # Close the file. file.close()
def generate_pivot(order=1, sim_index=None, pipe_name=None, pdb_limit=False): """Create and return the given pivot. @keyword order: The pivot number with 1 corresponding to the first pivot, 2 to the second, etc. @type order: int @keyword sim_index: The optional Monte Carlo simulation index. If provided, the pivot for the given simulation will be returned instead. @type sim_index: None or int @keyword pipe_name: The data pipe @type pipe_name: str @keyword pdb_limit: A flag which if True will cause the coordinate to be between -1000 and 1000. @type pdb_limit: bool @return: The give pivot point. @rtype: numpy 3D rank-1 float64 array """ # Checks. check_pipe(pipe_name) check_pivot(pipe_name=pipe_name) check_model(pipe_name=pipe_name) # The data pipe. if pipe_name == None: pipe_name = pipes.cdp_name() # Get the data pipe. dp = pipes.get_pipe(pipe_name) # Initialise. pivot = None # The double rotor parameterisation. if dp.model in [MODEL_DOUBLE_ROTOR]: # The 2nd pivot point (the centre of the frame). if sim_index != None and hasattr(dp, 'pivot_x_sim'): pivot_2nd = array([dp.pivot_x_sim[sim_index], dp.pivot_y_sim[sim_index], dp.pivot_z_sim[sim_index]], float64) else: pivot_2nd = array([dp.pivot_x, dp.pivot_y, dp.pivot_z], float64) # Generate the first pivot. if order == 1: # The eigenframe. frame = zeros((3, 3), float64) if sim_index != None and hasattr(dp, 'pivot_disp_sim'): euler_to_R_zyz(dp.eigen_alpha_sim[sim_index], dp.eigen_beta_sim[sim_index], dp.eigen_gamma_sim[sim_index], frame) pivot_disp = dp.pivot_disp_sim[sim_index] else: euler_to_R_zyz(dp.eigen_alpha, dp.eigen_beta, dp.eigen_gamma, frame) pivot_disp = dp.pivot_disp # The 1st pivot. pivot = pivot_2nd + frame[:, 2] * pivot_disp # Alias the 2nd pivot. elif order == 2: pivot = pivot_2nd # All other models. elif order == 1: if sim_index != None and hasattr(dp, 'pivot_x_sim'): pivot = array([dp.pivot_x_sim[sim_index], dp.pivot_y_sim[sim_index], dp.pivot_z_sim[sim_index]], float64) else: pivot = array([dp.pivot_x, dp.pivot_y, dp.pivot_z], float64) # PDB limits. if pivot is not None and pdb_limit: # The original pivot, as text. orig_pivot = "[%.3f, %.3f, %.3f]" % (pivot[0], pivot[1], pivot[2]) # Check each coordinate. out = False for i in range(3): if pivot[i] <= -900.0: pivot[i] = -900.0 out = True elif pivot[i] > 9900.0: pivot[i] = 9900.0 out = True # Failure. if out: new_pivot = "[%.3f, %.3f, %.3f]" % (pivot[0], pivot[1], pivot[2]) warn(RelaxWarning("The pivot point %s is outside of the PDB coordinate limits of [-999.999, 9999.999], less a 100 Angstrom buffer, shifting to %s." % (orig_pivot, new_pivot))) # Return the pivot. return pivot
def grid_search(self, lower=None, upper=None, inc=None, scaling_matrix=None, constraints=False, verbosity=0, sim_index=None): """Perform a grid search. @keyword lower: The per-model lower bounds of the grid search which must be equal to the number of parameters in the model. @type lower: list of lists of numbers @keyword upper: The per-model upper bounds of the grid search which must be equal to the number of parameters in the model. @type upper: list of lists of numbers @keyword inc: The per-model increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. @type inc: list of lists of int @keyword scaling_matrix: The per-model list of diagonal and square scaling matrices. @type scaling_matrix: list of numpy rank-2, float64 array or list of None @keyword constraints: If True, constraints are applied during the grid search (eliminating parts of the grid). If False, no constraints are used. @type constraints: bool @keyword verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. @type verbosity: int @keyword sim_index: The Monte Carlo simulation index. @type sim_index: None or int """ # Test if the Frame Order model has been set up. if not hasattr(cdp, 'model'): raise RelaxNoModelError('Frame Order') # Test if the pivot has been set. check_pivot() # The number of parameters. n = param_num() # Alias the single model grid bounds and increments. lower = lower[0] upper = upper[0] inc = inc[0] # Initialise the grid increments structures. grid = [] """This structure is a list of lists. The first dimension corresponds to the model parameter. The second dimension are the grid node positions.""" # Generate the grid. for i in range(n): # Fixed parameter. if inc[i] == None: grid.append(None) continue # Reset. dist_type = None end_point = True # Arccos grid from 0 to pi. if cdp.params[i] in ['ave_pos_beta', 'eigen_beta', 'axis_theta']: # Change the default increment numbers. if not isinstance(inc, list): inc[i] = int(inc[i] / 2) + 1 # The distribution type and end point. dist_type = 'acos' end_point = False # Append the grid row. row = grid_row(inc[i], lower[i], upper[i], dist_type=dist_type, end_point=end_point) grid.append(row) # Remove an inc if the end point has been removed. if not end_point: inc[i] -= 1 # Total number of points. total_pts = 1 for i in range(n): # Fixed parameter. if grid[i] == None: continue total_pts = total_pts * len(grid[i]) # Check the number. max_pts = 50e6 if total_pts > max_pts: raise RelaxError( "The total number of grid points '%s' exceeds the maximum of '%s'." % (total_pts, int(max_pts))) # Build the points array. pts = zeros((total_pts, n), float64) indices = zeros(n, int) for i in range(total_pts): # Loop over the dimensions. for j in range(n): # Fixed parameter. if grid[j] == None: # Get the current parameter value. pts[i, j] = getattr( cdp, cdp.params[j]) / scaling_matrix[0][j, j] # Add the point coordinate. else: pts[i, j] = grid[j][indices[j]] / scaling_matrix[0][j, j] # Increment the step positions. for j in range(n): if inc[j] != None and indices[j] < inc[j] - 1: indices[j] += 1 break # Exit so that the other step numbers are not incremented. else: indices[j] = 0 # Linear constraints. A, b = None, None if constraints: # Obtain the constraints. A, b = linear_constraints(scaling_matrix=scaling_matrix[0]) # Constraint flag set but no constraints present. if A is None: if verbosity: warn( RelaxWarning( "The '%s' model parameters are not constrained, turning the linear constraint algorithm off." % cdp.model)) constraints = False # The numeric integration information. if not hasattr(cdp, 'quad_int'): cdp.quad_int = False sobol_max_points, sobol_oversample = None, None if hasattr(cdp, 'sobol_max_points'): sobol_max_points = cdp.sobol_max_points sobol_oversample = cdp.sobol_oversample # Set up the data structures for the target function. param_vector, full_tensors, full_in_ref_frame, rdcs, rdc_err, rdc_weight, rdc_vect, rdc_const, pcs, pcs_err, pcs_weight, atomic_pos, temp, frq, paramag_centre, com, ave_pos_pivot, pivot, pivot_opt = target_fn_data_setup( sim_index=sim_index, verbosity=verbosity) # Get the Processor box singleton (it contains the Processor instance) and alias the Processor. processor_box = Processor_box() processor = processor_box.processor # Set up for multi-processor execution. if processor.processor_size() > 1: # Printout. print("Parallelised grid search.") print( "Randomising the grid points to equalise the time required for each grid subdivision.\n" ) # Randomise the points. shuffle(pts) # Loop over each grid subdivision, with all points violating constraints being eliminated. for subdivision in grid_split_array( divisions=processor.processor_size(), points=pts, A=A, b=b, verbosity=verbosity): # Set up the memo for storage on the master. memo = Frame_order_memo(sim_index=sim_index, scaling_matrix=scaling_matrix[0]) # Set up the command object to send to the slave and execute. command = Frame_order_grid_command( points=subdivision, scaling_matrix=scaling_matrix[0], sim_index=sim_index, model=cdp.model, param_vector=param_vector, full_tensors=full_tensors, full_in_ref_frame=full_in_ref_frame, rdcs=rdcs, rdc_err=rdc_err, rdc_weight=rdc_weight, rdc_vect=rdc_vect, rdc_const=rdc_const, pcs=pcs, pcs_err=pcs_err, pcs_weight=pcs_weight, atomic_pos=atomic_pos, temp=temp, frq=frq, paramag_centre=paramag_centre, com=com, ave_pos_pivot=ave_pos_pivot, pivot=pivot, pivot_opt=pivot_opt, sobol_max_points=sobol_max_points, sobol_oversample=sobol_oversample, verbosity=verbosity, quad_int=cdp.quad_int) # Add the slave command and memo to the processor queue. processor.add_to_queue(command, memo) # Execute the queued elements. processor.run_queue()
def generate_pivot(order=1, sim_index=None, pipe_name=None, pdb_limit=False): """Create and return the given pivot. @keyword order: The pivot number with 1 corresponding to the first pivot, 2 to the second, etc. @type order: int @keyword sim_index: The optional Monte Carlo simulation index. If provided, the pivot for the given simulation will be returned instead. @type sim_index: None or int @keyword pipe_name: The data pipe @type pipe_name: str @keyword pdb_limit: A flag which if True will cause the coordinate to be between -1000 and 1000. @type pdb_limit: bool @return: The give pivot point. @rtype: numpy 3D rank-1 float64 array """ # Checks. check_pipe(pipe_name) check_pivot(pipe_name=pipe_name) check_model(pipe_name=pipe_name) # The data pipe. if pipe_name == None: pipe_name = pipes.cdp_name() # Get the data pipe. dp = pipes.get_pipe(pipe_name) # Initialise. pivot = None # The double rotor parameterisation. if dp.model in [MODEL_DOUBLE_ROTOR]: # The 2nd pivot point (the centre of the frame). if sim_index != None and hasattr(dp, 'pivot_x_sim'): pivot_2nd = array([ dp.pivot_x_sim[sim_index], dp.pivot_y_sim[sim_index], dp.pivot_z_sim[sim_index] ], float64) else: pivot_2nd = array([dp.pivot_x, dp.pivot_y, dp.pivot_z], float64) # Generate the first pivot. if order == 1: # The eigenframe. frame = zeros((3, 3), float64) if sim_index != None and hasattr(dp, 'pivot_disp_sim'): euler_to_R_zyz(dp.eigen_alpha_sim[sim_index], dp.eigen_beta_sim[sim_index], dp.eigen_gamma_sim[sim_index], frame) pivot_disp = dp.pivot_disp_sim[sim_index] else: euler_to_R_zyz(dp.eigen_alpha, dp.eigen_beta, dp.eigen_gamma, frame) pivot_disp = dp.pivot_disp # The 1st pivot. pivot = pivot_2nd + frame[:, 2] * pivot_disp # Alias the 2nd pivot. elif order == 2: pivot = pivot_2nd # All other models. elif order == 1: if sim_index != None and hasattr(dp, 'pivot_x_sim'): pivot = array([ dp.pivot_x_sim[sim_index], dp.pivot_y_sim[sim_index], dp.pivot_z_sim[sim_index] ], float64) else: pivot = array([dp.pivot_x, dp.pivot_y, dp.pivot_z], float64) # PDB limits. if pivot is not None and pdb_limit: # The original pivot, as text. orig_pivot = "[%.3f, %.3f, %.3f]" % (pivot[0], pivot[1], pivot[2]) # Check each coordinate. out = False for i in range(3): if pivot[i] <= -900.0: pivot[i] = -900.0 out = True elif pivot[i] > 9900.0: pivot[i] = 9900.0 out = True # Failure. if out: new_pivot = "[%.3f, %.3f, %.3f]" % (pivot[0], pivot[1], pivot[2]) warn( RelaxWarning( "The pivot point %s is outside of the PDB coordinate limits of [-999.999, 9999.999], less a 100 Angstrom buffer, shifting to %s." % (orig_pivot, new_pivot))) # Return the pivot. return pivot
def grid_search(self, lower=None, upper=None, inc=None, scaling_matrix=None, constraints=False, verbosity=0, sim_index=None): """Perform a grid search. @keyword lower: The per-model lower bounds of the grid search which must be equal to the number of parameters in the model. @type lower: list of lists of numbers @keyword upper: The per-model upper bounds of the grid search which must be equal to the number of parameters in the model. @type upper: list of lists of numbers @keyword inc: The per-model increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. @type inc: list of lists of int @keyword scaling_matrix: The per-model list of diagonal and square scaling matrices. @type scaling_matrix: list of numpy rank-2, float64 array or list of None @keyword constraints: If True, constraints are applied during the grid search (eliminating parts of the grid). If False, no constraints are used. @type constraints: bool @keyword verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. @type verbosity: int @keyword sim_index: The Monte Carlo simulation index. @type sim_index: None or int """ # Test if the Frame Order model has been set up. if not hasattr(cdp, 'model'): raise RelaxNoModelError('Frame Order') # Test if the pivot has been set. check_pivot() # The number of parameters. n = param_num() # Alias the single model grid bounds and increments. lower = lower[0] upper = upper[0] inc = inc[0] # Initialise the grid increments structures. grid = [] """This structure is a list of lists. The first dimension corresponds to the model parameter. The second dimension are the grid node positions.""" # Generate the grid. for i in range(n): # Fixed parameter. if inc[i] == None: grid.append(None) continue # Reset. dist_type = None end_point = True # Arccos grid from 0 to pi. if cdp.params[i] in ['ave_pos_beta', 'eigen_beta', 'axis_theta']: # Change the default increment numbers. if not isinstance(inc, list): inc[i] = int(inc[i] / 2) + 1 # The distribution type and end point. dist_type = 'acos' end_point = False # Append the grid row. row = grid_row(inc[i], lower[i], upper[i], dist_type=dist_type, end_point=end_point) grid.append(row) # Remove an inc if the end point has been removed. if not end_point: inc[i] -= 1 # Total number of points. total_pts = 1 for i in range(n): # Fixed parameter. if grid[i] == None: continue total_pts = total_pts * len(grid[i]) # Check the number. max_pts = 50e6 if total_pts > max_pts: raise RelaxError("The total number of grid points '%s' exceeds the maximum of '%s'." % (total_pts, int(max_pts))) # Build the points array. pts = zeros((total_pts, n), float64) indices = zeros(n, int) for i in range(total_pts): # Loop over the dimensions. for j in range(n): # Fixed parameter. if grid[j] == None: # Get the current parameter value. pts[i, j] = getattr(cdp, cdp.params[j]) / scaling_matrix[0][j, j] # Add the point coordinate. else: pts[i, j] = grid[j][indices[j]] / scaling_matrix[0][j, j] # Increment the step positions. for j in range(n): if inc[j] != None and indices[j] < inc[j]-1: indices[j] += 1 break # Exit so that the other step numbers are not incremented. else: indices[j] = 0 # Linear constraints. A, b = None, None if constraints: # Obtain the constraints. A, b = linear_constraints(scaling_matrix=scaling_matrix[0]) # Constraint flag set but no constraints present. if A is None: if verbosity: warn(RelaxWarning("The '%s' model parameters are not constrained, turning the linear constraint algorithm off." % cdp.model)) constraints = False # The numeric integration information. if not hasattr(cdp, 'quad_int'): cdp.quad_int = False sobol_max_points, sobol_oversample = None, None if hasattr(cdp, 'sobol_max_points'): sobol_max_points = cdp.sobol_max_points sobol_oversample = cdp.sobol_oversample # Set up the data structures for the target function. param_vector, full_tensors, full_in_ref_frame, rdcs, rdc_err, rdc_weight, rdc_vect, rdc_const, pcs, pcs_err, pcs_weight, atomic_pos, temp, frq, paramag_centre, com, ave_pos_pivot, pivot, pivot_opt = target_fn_data_setup(sim_index=sim_index, verbosity=verbosity) # Get the Processor box singleton (it contains the Processor instance) and alias the Processor. processor_box = Processor_box() processor = processor_box.processor # Set up for multi-processor execution. if processor.processor_size() > 1: # Printout. print("Parallelised grid search.") print("Randomising the grid points to equalise the time required for each grid subdivision.\n") # Randomise the points. shuffle(pts) # Loop over each grid subdivision, with all points violating constraints being eliminated. for subdivision in grid_split_array(divisions=processor.processor_size(), points=pts, A=A, b=b, verbosity=verbosity): # Set up the memo for storage on the master. memo = Frame_order_memo(sim_index=sim_index, scaling_matrix=scaling_matrix[0]) # Set up the command object to send to the slave and execute. command = Frame_order_grid_command(points=subdivision, scaling_matrix=scaling_matrix[0], sim_index=sim_index, model=cdp.model, param_vector=param_vector, full_tensors=full_tensors, full_in_ref_frame=full_in_ref_frame, rdcs=rdcs, rdc_err=rdc_err, rdc_weight=rdc_weight, rdc_vect=rdc_vect, rdc_const=rdc_const, pcs=pcs, pcs_err=pcs_err, pcs_weight=pcs_weight, atomic_pos=atomic_pos, temp=temp, frq=frq, paramag_centre=paramag_centre, com=com, ave_pos_pivot=ave_pos_pivot, pivot=pivot, pivot_opt=pivot_opt, sobol_max_points=sobol_max_points, sobol_oversample=sobol_oversample, verbosity=verbosity, quad_int=cdp.quad_int) # Add the slave command and memo to the processor queue. processor.add_to_queue(command, memo) # Execute the queued elements. processor.run_queue()