def create_cell_manager(options): print 'creating cell manager', options # create a parallel cell manager. cell_manager = ParallelCellManager(arrays_to_bin=[], max_cell_scale=options.max_cell_scale, dimension=2, load_balancing=False, initialize=False) # enable load balancing cell_manager.load_balancer = LoadBalancer(parallel_cell_manager=cell_manager) cell_manager.load_balancer.skip_iteration = 1 cell_manager.load_balancer.threshold_ratio = 10. for i,pa in enumerate(create_particles(options)): cell_manager.arrays_to_bin.append(pa) print 'parray %d:'%i, pa.get_number_of_particles() cell_manager.initialize() print 'num_particles', cell_manager.get_number_of_particles() return cell_manager
def create_solver(self): self.cm = cm = ParallelCellManager(self.pas, self.cell_size, self.cell_size) #print 'num_cells:', len(cm.cells_dict) cm.load_balancing = False # balancing will be done manually cm.dimension = self.dim self.lb = lb = self.cm.load_balancer = LoadBalancer( parallel_cell_manager=self.cm) lb.skip_iteration = 1 lb.threshold_ratio = 10. lb.lb_max_iteration = 10 lb.setup()
def create_cell_manager(options): print 'creating cell manager', options # create a parallel cell manager. cell_manager = ParallelCellManager(arrays_to_bin=[], max_cell_scale=options.max_cell_scale, dimension=2, load_balancing=False, initialize=False) # enable load balancing cell_manager.load_balancer = LoadBalancer( parallel_cell_manager=cell_manager) cell_manager.load_balancer.skip_iteration = 1 cell_manager.load_balancer.threshold_ratio = 10. for i, pa in enumerate(create_particles(options)): cell_manager.arrays_to_bin.append(pa) print 'parray %d:' % i, pa.get_number_of_particles() cell_manager.initialize() print 'num_particles', cell_manager.get_number_of_particles() return cell_manager
import logging logger = logging.getLogger() log_file_name = "/tmp/log_pysph_" + str(rank) logging.basicConfig(level=logging.DEBUG, filename=log_file_name, filemode="w") logger.addHandler(logging.StreamHandler()) # local imports from pysph.base.particle_array import ParticleArray from pysph.parallel.parallel_cell import ParallelCellManager from pysph.solver.basic_generators import LineGenerator from pysph.base.cell import INT_INF from pysph.base.point import * pcm = ParallelCellManager(initialize=False, dimension=1) parray = ParticleArray(name="parray") if rank == 0: lg = LineGenerator(start_point=Point(0, 0, 0), end_point=Point(1.0, 0, 0), particle_spacing=0.01) x, y, z = lg.get_coords() parray.add_property({"name": "x", "data": x}) parray.add_property({"name": "y", "data": y}) parray.add_property({"name": "z", "data": z}) parray.add_property({"name": "h"}) parray.align_particles() parray.h[:] = 0.01 else:
# logging setup # logging setup import logging logger = logging.getLogger() log_file_name = '/tmp/log_pysph_' + str(rank) logging.basicConfig(level=logging.DEBUG, filename=log_file_name, filemode='w') logger.addHandler(logging.StreamHandler()) # local imports from pysph.base.particle_array import ParticleArray from pysph.parallel.parallel_cell import ParallelCellManager from pysph.solver.basic_generators import LineGenerator from pysph.base.cell import INT_INF from pysph.base.point import * pcm = ParallelCellManager(initialize=False, dimension=1) parray = ParticleArray(name='parray') if rank == 0: lg = LineGenerator(start_point=Point(0, 0, 0), end_point=Point(1.0, 0, 0), particle_spacing=0.01) x, y, z = lg.get_coords() parray.add_property({'name': 'x', 'data': x}) parray.add_property({'name': 'y', 'data': y}) parray.add_property({'name': 'z', 'data': z}) parray.add_property({'name': 'h'}) parray.align_particles() parray.h[:] = 0.01
logger = logging.getLogger() log_file_name = 'parallel_cell_check.log.'+str(rank) logging.basicConfig(level=logging.DEBUG, filename=log_file_name, filemode='w') logger.addHandler(logging.StreamHandler()) # local imports from pysph.base.particle_array import ParticleArray from pysph.parallel.parallel_cell import ParallelCellManager from pysph.solver.basic_generators import LineGenerator from pysph.base.cell import INT_INF from pysph.base.point import * from pysph.parallel.load_balancer import LoadBalancer pcm = ParallelCellManager(initialize=False) # create 2 particles, one with proc 0 another with proc 1 lg = LineGenerator(particle_spacing=0.5) lg.start_point.x = 0.0 lg.end_point.x = 10.0 lg.start_point.y = lg.start_point.z = 0.0 lg.end_point.y = lg.end_point.z = 0.0 x, y, z = lg.get_coords() num_particles = len(x) logger.info('Num particles : %d'%(len(x)))
def __init__(self, arrays=[], in_parallel=False, variable_h=False, load_balancing=True, update_particles=True, locator_type=SPHNeighborLocator, periodic_domain=None, min_cell_size=-1): """ Constructor Parameters: ----------- arrays -- list of particle arrays in the simulation in_parallel -- flag for parallel runs variable_h -- flag for variable smoothing lengths load_balancing -- flag for dynamic load balancing. periodic_domain -- the periodic domain for periodicity """ # set the flags self.variable_h = variable_h self.in_parallel = in_parallel self.load_balancing = load_balancing self.locator_type = locator_type # Some sanity checks on the input arrays. assert len(arrays) > 0, "Particles must be given some arrays!" prec = arrays[0].cl_precision msg = "All arrays must have the same cl_precision" for arr in arrays[1:]: assert arr.cl_precision == prec, msg self.arrays = arrays self.kernel = None # create the cell manager if not in_parallel: self.cell_manager = CellManager(arrays_to_bin=arrays, min_cell_size=min_cell_size, periodic_domain=periodic_domain) else: self.cell_manager = ParallelCellManager( arrays_to_bin=arrays, load_balancing=load_balancing) self.pid = self.cell_manager.pid # create the nnps manager self.nnps_manager = NNPSManager(cell_manager=self.cell_manager, variable_h=variable_h, locator_type=self.locator_type) # set defaults self.correction_manager = None self.misc_prop_update_functions = [] # call an update on the particles (i.e index) if update_particles: self.update()
class Particles(object): """ A collection of particles and related data structures that hat define an SPH simulation. In pysph, particle properties are stored in a ParticleArray. The array may represent a particular type of particle (solid, fluid etc). Valid types are defined in base.particle_types. Indexing of the particles is performed by a CellManager and nearest neighbors are obtained via an instance of NNPSManager. Particles is a collection of these data structures to provide a single point access to (a) Hold all particle information (b) Update the indexing scheme when particles have moved. (d) Update remote particle properties in parallel runs. (e) Barrier synchronizations across processors Data Attributes: ---------------- arrays -- a list of particle arrays in the simulation. cell_manager -- the CellManager for spatial indexing. nnps_manager -- the NNPSManager for neighbor queries. correction_manager -- a kernel KernelCorrectionManager if kernel correction is used. Defaults to None misc_prop_update_functions -- A list of functions to evaluate properties at the beginning of a sub step. variable_h -- boolean indicating if variable smoothing lengths are considered. Defaults to False in_parallel -- boolean indicating if running in parallel. Defaults to False load_balancing -- boolean indicating if load balancing is required. Defaults to False. pid -- processor id if running in parallel Example: --------- In [1]: import pysph.base.api as base In [2]: x = linspace(-pi,pi,101) In [3]: pa = base.get_particle_array(x=x) In [4]: particles = base.Particles(arrays=[pa], in_parallel=True, load_balancing=False, variable_h=True) Notes: ------ An appropriate cell manager (CellManager/ParallelCellManager) is created with reference to the 'in_parallel' attribute. Similarly an appropriate NNPSManager is created with reference to the 'variable_h' attribute. """ def __init__(self, arrays=[], in_parallel=False, variable_h=False, load_balancing=True, update_particles=True, locator_type=SPHNeighborLocator, periodic_domain=None, min_cell_size=-1): """ Constructor Parameters: ----------- arrays -- list of particle arrays in the simulation in_parallel -- flag for parallel runs variable_h -- flag for variable smoothing lengths load_balancing -- flag for dynamic load balancing. periodic_domain -- the periodic domain for periodicity """ # set the flags self.variable_h = variable_h self.in_parallel = in_parallel self.load_balancing = load_balancing self.locator_type = locator_type # Some sanity checks on the input arrays. assert len(arrays) > 0, "Particles must be given some arrays!" prec = arrays[0].cl_precision msg = "All arrays must have the same cl_precision" for arr in arrays[1:]: assert arr.cl_precision == prec, msg self.arrays = arrays self.kernel = None # create the cell manager if not in_parallel: self.cell_manager = CellManager(arrays_to_bin=arrays, min_cell_size=min_cell_size, periodic_domain=periodic_domain) else: self.cell_manager = ParallelCellManager( arrays_to_bin=arrays, load_balancing=load_balancing) self.pid = self.cell_manager.pid # create the nnps manager self.nnps_manager = NNPSManager(cell_manager=self.cell_manager, variable_h=variable_h, locator_type=self.locator_type) # set defaults self.correction_manager = None self.misc_prop_update_functions = [] # call an update on the particles (i.e index) if update_particles: self.update() def update(self, cache_neighbors=False): """ Update the status of the Particles. Parameters: ----------- cache_neighbors -- flag for caching kernel interactions Notes: ------- This function must be called whenever particles have moved and the indexing structure invalid. After a call to this function, particle neighbors will be accurately returned. Since particles move at the end of an integration step/sub-step, we may perform any other operation that would be required for the subsequent step/sub-step. Examples of these are summation density, equation of state, smoothing length updates, evaluation of velocity divergence/vorticity etc. All other properties may be updated by appending functions to the list 'misc_prop_update_functions'. These functions must implement an 'eval' method which takes no arguments. An example is the UpdateDivergence function in 'sph.update_misc_props.py' """ # update the cell structure err = self.nnps_manager.py_update() assert err != -1, 'NNPSManager update failed! ' # update any other properties (rho, p, cs, div etc.) self.evaluate_misc_properties() # evaluate kernel correction terms if self.correction_manager: self.correction_manager.update() def evaluate_misc_properties(self): """ Evaluate properties from the list of functions. """ for func in self.misc_prop_update_functions: func.eval() def add_misc_function(self, func, operation, kernel): """ Add a function to be performed when particles are updated Parameters: ----------- func -- The function to perform. Defined in sph.update_functions operation -- the calc operation that is required for the function kernel -- the kernel used to setup the calcs. Example: -------- The conduction coefficient required for the artificial heat requires the velocity divergence at a particle. This must be available at the start of every substep of an integration step. """ calcs = operation.get_calcs(self, kernel) self.misc_prop_update_functions.append(func(calcs)) def get_named_particle_array(self, name): """ Return the named particle array if it exists """ has_array = False for array in self.arrays: if array.name == name: arr = array has_array = True if has_array: return arr else: print 'Array %s does not exist!' % (name) def update_remote_particle_properties(self, props=None): """ Perform a remote particle property update. This function needs to be called when the remote particles on one processor need to be updated on account of computations on another physical processor. """ if self.in_parallel: self.cell_manager.update_remote_particle_properties(props=props) def barrier(self): """ Synchronize all processes """ if self.in_parallel: self.cell_manager.barrier() @classmethod def get_neighbor_particle_locator(self, src, dst, locator_type=SPHNeighborLocator, variable_h=False, radius_scale=2.0): """ Return a neighbor locator from the NNPSManager """ cell_manager = CellManager(arrays_to_bin=[src, dst]) nnps_manager = NNPSManager(cell_manager, locator_type=locator_type, variable_h=variable_h) return nnps_manager.get_neighbor_particle_locator( src, dst, radius_scale) def get_cl_precision(self): """Return the cl_precision used by the Particle Arrays. This property cannot be set it is set at construction time for the Particle arrays. This is simply a convenience function to query the cl_precision. """ return self.arrays[0].cl_precision
# logging setup import logging logger = logging.getLogger() log_file_name = '/tmp/log_pysph_'+str(rank) logging.basicConfig(level=logging.DEBUG, filename=log_file_name, filemode='w') logger.addHandler(logging.StreamHandler()) # local imports from pysph.base.particle_array import ParticleArray from pysph.parallel.parallel_cell import ParallelCellManager from pysph.solver.basic_generators import LineGenerator from pysph.base.cell import INT_INF from pysph.base.point import * pcm = ParallelCellManager(initialize=False) # create two particles, one with proc 0 another with proc 1 if rank == 0: parray = ParticleArray() parray.add_property({'name':'x', 'data':[0.4]}) parray.add_property({'name':'h', 'data':[0.1]}) elif rank == 1: parray = ParticleArray() parray.add_property({'name':'x', 'data':[1.2]}) parray.add_property({'name':'h', 'data':[0.1]}) elif rank == 2: parray = ParticleArray() parray.add_property({'name':'x', 'data':[2.0]}) parray.add_property({'name':'h', 'data':[0.1]})
# logging setup import logging logger = logging.getLogger() log_file_name = '/tmp/log_pysph_' + str(rank) logging.basicConfig(level=logging.DEBUG, filename=log_file_name, filemode='w') logger.addHandler(logging.StreamHandler()) # local imports from pysph.base.particle_array import ParticleArray from pysph.parallel.parallel_cell import ParallelCellManager from pysph.solver.basic_generators import LineGenerator from pysph.base.cell import INT_INF from pysph.base.point import * pcm = ParallelCellManager(initialize=False) # create two particles, one with proc 0 another with proc 1 if rank == 0: parray = ParticleArray() parray.add_property({'name': 'x', 'data': [0.4]}) parray.add_property({'name': 'h', 'data': [0.1]}) elif rank == 1: parray = ParticleArray() parray.add_property({'name': 'x', 'data': [1.2]}) parray.add_property({'name': 'h', 'data': [0.1]}) elif rank == 2: parray = ParticleArray() parray.add_property({'name': 'x', 'data': [2.0]}) parray.add_property({'name': 'h', 'data': [0.1]})
def test(): pcm = ParallelCellManager(initialize=False) # create 2 particles, one with proc 0 another with proc 1 lg = LineGenerator(particle_spacing=0.5) lg.start_point.x = 0.0 lg.end_point.x = 10.0 lg.start_point.y = lg.start_point.z = 0.0 lg.end_point.y = lg.end_point.z = 0.0 x, y, z = lg.get_coords() num_particles = len(x) logger.info('Num particles : %d'%(len(x))) parray = ParticleArray(name='p1', x={'data':x}, y={'data':y}, z={'data':z}, h={'data':None, 'default':0.5}) # add parray to the cell manager parray.add_property({'name':'u'}) parray.add_property({'name':'v'}) parray.add_property({'name':'w'}) parray.add_property({'name':'rho'}) parray.add_property({'name':'p'}) parray = LoadBalancer.distribute_particles(parray, num_procs, 1.0)[rank] pcm.add_array_to_bin(parray) np = pcm.arrays_to_bin[0].num_real_particles nptot = comm.bcast(comm.reduce(np)) assert nptot == num_particles pcm.initialize() np = pcm.arrays_to_bin[0].num_real_particles nptot = comm.bcast(comm.reduce(np)) assert nptot == num_particles pcm.set_jump_tolerance(INT_INF()) logger.debug('%d: num_cells=%d'%(rank,len(pcm.cells_dict))) logger.debug('%d:'%rank + ('\n%d '%rank).join([str(c) for c in pcm.cells_dict.values()])) # on processor 0 move all particles from one of its cell to the next cell if rank == 0: cell = pcm.cells_dict.get(list(pcm.proc_map.cell_map.values()[0])[0]) logger.debug('Cell is %s'%(cell)) indices = [] cell.get_particle_ids(indices) indices = indices[0] logger.debug('Num particles in Cell is %d'%(indices.length)) parr = cell.arrays_to_bin[0] x, y, z = parr.get('x', 'y', 'z', only_real_particles=False) logger.debug(str(len(x)) + str(x)) logger.debug(str(indices.length) + str(indices.get_npy_array())) for i in range(indices.length): x[indices[i]] += cell.cell_size parr.set_dirty(True) pcm.update_status() logger.debug('Calling cell manager update') logger.debug('Is dirty %s'%(pcm.is_dirty)) pcm.update() np = pcm.arrays_to_bin[0].num_real_particles nptot = comm.bcast(comm.reduce(np)) assert nptot == num_particles #logger.debug('hierarchy :%s'%(pcm.hierarchy_list)) logger.debug('cells : %s'%(pcm.cells_dict)) logger.debug('num particles : %d'%(parray.get_number_of_particles())) logger.debug('real particles : %d'%(parray.num_real_particles))
def __init__( self, arrays=[], in_parallel=False, variable_h=False, load_balancing=True, update_particles=True, locator_type=SPHNeighborLocator, periodic_domain=None, min_cell_size=-1, ): """ Constructor Parameters: ----------- arrays -- list of particle arrays in the simulation in_parallel -- flag for parallel runs variable_h -- flag for variable smoothing lengths load_balancing -- flag for dynamic load balancing. periodic_domain -- the periodic domain for periodicity """ # set the flags self.variable_h = variable_h self.in_parallel = in_parallel self.load_balancing = load_balancing self.locator_type = locator_type # Some sanity checks on the input arrays. assert len(arrays) > 0, "Particles must be given some arrays!" prec = arrays[0].cl_precision msg = "All arrays must have the same cl_precision" for arr in arrays[1:]: assert arr.cl_precision == prec, msg self.arrays = arrays self.kernel = None # create the cell manager if not in_parallel: self.cell_manager = CellManager( arrays_to_bin=arrays, min_cell_size=min_cell_size, periodic_domain=periodic_domain ) else: self.cell_manager = ParallelCellManager(arrays_to_bin=arrays, load_balancing=load_balancing) self.pid = self.cell_manager.pid # create the nnps manager self.nnps_manager = NNPSManager( cell_manager=self.cell_manager, variable_h=variable_h, locator_type=self.locator_type ) # set defaults self.correction_manager = None self.misc_prop_update_functions = [] # call an update on the particles (i.e index) if update_particles: self.update()
class Particles(object): """ A collection of particles and related data structures that hat define an SPH simulation. In pysph, particle properties are stored in a ParticleArray. The array may represent a particular type of particle (solid, fluid etc). Valid types are defined in base.particle_types. Indexing of the particles is performed by a CellManager and nearest neighbors are obtained via an instance of NNPSManager. Particles is a collection of these data structures to provide a single point access to (a) Hold all particle information (b) Update the indexing scheme when particles have moved. (d) Update remote particle properties in parallel runs. (e) Barrier synchronizations across processors Data Attributes: ---------------- arrays -- a list of particle arrays in the simulation. cell_manager -- the CellManager for spatial indexing. nnps_manager -- the NNPSManager for neighbor queries. correction_manager -- a kernel KernelCorrectionManager if kernel correction is used. Defaults to None misc_prop_update_functions -- A list of functions to evaluate properties at the beginning of a sub step. variable_h -- boolean indicating if variable smoothing lengths are considered. Defaults to False in_parallel -- boolean indicating if running in parallel. Defaults to False load_balancing -- boolean indicating if load balancing is required. Defaults to False. pid -- processor id if running in parallel Example: --------- In [1]: import pysph.base.api as base In [2]: x = linspace(-pi,pi,101) In [3]: pa = base.get_particle_array(x=x) In [4]: particles = base.Particles(arrays=[pa], in_parallel=True, load_balancing=False, variable_h=True) Notes: ------ An appropriate cell manager (CellManager/ParallelCellManager) is created with reference to the 'in_parallel' attribute. Similarly an appropriate NNPSManager is created with reference to the 'variable_h' attribute. """ def __init__( self, arrays=[], in_parallel=False, variable_h=False, load_balancing=True, update_particles=True, locator_type=SPHNeighborLocator, periodic_domain=None, min_cell_size=-1, ): """ Constructor Parameters: ----------- arrays -- list of particle arrays in the simulation in_parallel -- flag for parallel runs variable_h -- flag for variable smoothing lengths load_balancing -- flag for dynamic load balancing. periodic_domain -- the periodic domain for periodicity """ # set the flags self.variable_h = variable_h self.in_parallel = in_parallel self.load_balancing = load_balancing self.locator_type = locator_type # Some sanity checks on the input arrays. assert len(arrays) > 0, "Particles must be given some arrays!" prec = arrays[0].cl_precision msg = "All arrays must have the same cl_precision" for arr in arrays[1:]: assert arr.cl_precision == prec, msg self.arrays = arrays self.kernel = None # create the cell manager if not in_parallel: self.cell_manager = CellManager( arrays_to_bin=arrays, min_cell_size=min_cell_size, periodic_domain=periodic_domain ) else: self.cell_manager = ParallelCellManager(arrays_to_bin=arrays, load_balancing=load_balancing) self.pid = self.cell_manager.pid # create the nnps manager self.nnps_manager = NNPSManager( cell_manager=self.cell_manager, variable_h=variable_h, locator_type=self.locator_type ) # set defaults self.correction_manager = None self.misc_prop_update_functions = [] # call an update on the particles (i.e index) if update_particles: self.update() def update(self, cache_neighbors=False): """ Update the status of the Particles. Parameters: ----------- cache_neighbors -- flag for caching kernel interactions Notes: ------- This function must be called whenever particles have moved and the indexing structure invalid. After a call to this function, particle neighbors will be accurately returned. Since particles move at the end of an integration step/sub-step, we may perform any other operation that would be required for the subsequent step/sub-step. Examples of these are summation density, equation of state, smoothing length updates, evaluation of velocity divergence/vorticity etc. All other properties may be updated by appending functions to the list 'misc_prop_update_functions'. These functions must implement an 'eval' method which takes no arguments. An example is the UpdateDivergence function in 'sph.update_misc_props.py' """ # update the cell structure err = self.nnps_manager.py_update() assert err != -1, "NNPSManager update failed! " # update any other properties (rho, p, cs, div etc.) self.evaluate_misc_properties() # evaluate kernel correction terms if self.correction_manager: self.correction_manager.update() def evaluate_misc_properties(self): """ Evaluate properties from the list of functions. """ for func in self.misc_prop_update_functions: func.eval() def add_misc_function(self, func, operation, kernel): """ Add a function to be performed when particles are updated Parameters: ----------- func -- The function to perform. Defined in sph.update_functions operation -- the calc operation that is required for the function kernel -- the kernel used to setup the calcs. Example: -------- The conduction coefficient required for the artificial heat requires the velocity divergence at a particle. This must be available at the start of every substep of an integration step. """ calcs = operation.get_calcs(self, kernel) self.misc_prop_update_functions.append(func(calcs)) def get_named_particle_array(self, name): """ Return the named particle array if it exists """ has_array = False for array in self.arrays: if array.name == name: arr = array has_array = True if has_array: return arr else: print "Array %s does not exist!" % (name) def update_remote_particle_properties(self, props=None): """ Perform a remote particle property update. This function needs to be called when the remote particles on one processor need to be updated on account of computations on another physical processor. """ if self.in_parallel: self.cell_manager.update_remote_particle_properties(props=props) def barrier(self): """ Synchronize all processes """ if self.in_parallel: self.cell_manager.barrier() @classmethod def get_neighbor_particle_locator( self, src, dst, locator_type=SPHNeighborLocator, variable_h=False, radius_scale=2.0 ): """ Return a neighbor locator from the NNPSManager """ cell_manager = CellManager(arrays_to_bin=[src, dst]) nnps_manager = NNPSManager(cell_manager, locator_type=locator_type, variable_h=variable_h) return nnps_manager.get_neighbor_particle_locator(src, dst, radius_scale) def get_cl_precision(self): """Return the cl_precision used by the Particle Arrays. This property cannot be set it is set at construction time for the Particle arrays. This is simply a convenience function to query the cl_precision. """ return self.arrays[0].cl_precision
import logging logger = logging.getLogger() log_file_name = 'parallel_cell_check.log.' + str(rank) logging.basicConfig(level=logging.DEBUG, filename=log_file_name, filemode='w') logger.addHandler(logging.StreamHandler()) # local imports from pysph.base.particle_array import ParticleArray from pysph.parallel.parallel_cell import ParallelCellManager from pysph.solver.basic_generators import LineGenerator from pysph.base.cell import INT_INF from pysph.base.point import * from pysph.parallel.load_balancer import LoadBalancer pcm = ParallelCellManager(initialize=False) # create 2 particles, one with proc 0 another with proc 1 lg = LineGenerator(particle_spacing=0.5) lg.start_point.x = 0.0 lg.end_point.x = 10.0 lg.start_point.y = lg.start_point.z = 0.0 lg.end_point.y = lg.end_point.z = 0.0 x, y, z = lg.get_coords() num_particles = len(x) logger.info('Num particles : %d' % (len(x)))