示例#1
0
def get_distributed_particles(pa, comm, cell_size):
    # FIXME: this can be removed once the examples all use Application.
    from pysph.parallel.load_balancer import LoadBalancer
    rank = comm.Get_rank()
    num_procs = comm.Get_size()

    if rank == 0:
        lb = LoadBalancer.distribute_particles(pa, num_procs=num_procs,
                                               block_size=cell_size)
    else:
        lb = None

    particles = comm.scatter(lb, root=0)

    return particles
示例#2
0
文件: utils.py 项目: mpcsdspa/pysph
def get_distributed_particles(pa, comm, cell_size):
    # FIXME: this can be removed once the examples all use Application.
    from pysph.parallel.load_balancer import LoadBalancer
    rank = comm.Get_rank()
    num_procs = comm.Get_size()

    if rank == 0:
        lb = LoadBalancer.distribute_particles(pa,
                                               num_procs=num_procs,
                                               block_size=cell_size)
    else:
        lb = None

    particles = comm.scatter(lb, root=0)

    return particles
示例#3
0
    def create_particles(self, variable_h, callable, min_cell_size=-1,
                         *args, **kw):        
        """ Create particles given a callable and any arguments to it.
        This will also automatically distribute the particles among
        processors if this is a parallel run.  Returns the `Particles`
        instance that is created.
        """

        num_procs = self.num_procs
        rank = self.rank
        data = None
        if rank == 0:
            # Only master creates the particles.
            pa = callable(*args, **kw)

            if num_procs > 1:
                # Use the offline load-balancer to distribute the data
                # initially. Negative cell size forces automatic computation. 
                data = LoadBalancer.distribute_particles(pa, 
                                                         num_procs=num_procs, 
                                                         block_size=-1)
        if num_procs > 1:
            # Now scatter the distributed data.
            pa = self.comm.scatter(data, root=0)

        self.particle_array = pa

        in_parallel = num_procs > 1
        if isinstance(pa, (ParticleArray,)):
            pa = [pa]

        no_load_balance = self.options.no_load_balance
        if no_load_balance:
            self.load_balance = False
        else:
            self.load_balance = True

        self.particles = Particles(arrays=pa, variable_h=variable_h,
                                   in_parallel=in_parallel,
                                   load_balancing=self.load_balance,
                                   update_particles=True,
                                   min_cell_size=min_cell_size)

        return self.particles
示例#4
0
parray = ParticleArray(name='p1',
                       x={'data':x},
                       y={'data':y},
                       z={'data':z},
                       h={'data':None, 'default':0.5})


# add parray to the cell manager
parray.add_property({'name':'u'})
parray.add_property({'name':'v'})
parray.add_property({'name':'w'})
parray.add_property({'name':'rho'})
parray.add_property({'name':'p'})

parray = LoadBalancer.distribute_particles(parray, num_procs, 1.0)[rank]
pcm.add_array_to_bin(parray)

np = pcm.arrays_to_bin[0].num_real_particles
nptot = comm.bcast(comm.reduce(np))
assert nptot == num_particles

pcm.initialize()

np = pcm.arrays_to_bin[0].num_real_particles
nptot = comm.bcast(comm.reduce(np))
assert nptot == num_particles

pcm.set_jump_tolerance(INT_INF())

logger.debug('%d: num_cells=%d'%(rank,len(pcm.cells_dict)))
示例#5
0
def test():
    pcm = ParallelCellManager(initialize=False)

    # create 2 particles, one with proc 0 another with proc 1

    lg = LineGenerator(particle_spacing=0.5)

    lg.start_point.x = 0.0
    lg.end_point.x = 10.0
    lg.start_point.y = lg.start_point.z = 0.0
    lg.end_point.y = lg.end_point.z = 0.0

    x, y, z = lg.get_coords()
    num_particles = len(x)

    logger.info('Num particles : %d'%(len(x)))

    parray = ParticleArray(name='p1',
                       x={'data':x},
                       y={'data':y},
                       z={'data':z},
                       h={'data':None, 'default':0.5})


    # add parray to the cell manager
    parray.add_property({'name':'u'})
    parray.add_property({'name':'v'})
    parray.add_property({'name':'w'})
    parray.add_property({'name':'rho'})
    parray.add_property({'name':'p'})
    
    parray = LoadBalancer.distribute_particles(parray, num_procs, 1.0)[rank]
    pcm.add_array_to_bin(parray)
    
    np = pcm.arrays_to_bin[0].num_real_particles
    nptot = comm.bcast(comm.reduce(np))
    assert nptot == num_particles

    pcm.initialize()

    np = pcm.arrays_to_bin[0].num_real_particles
    nptot = comm.bcast(comm.reduce(np))
    assert nptot == num_particles
    
    pcm.set_jump_tolerance(INT_INF())

    logger.debug('%d: num_cells=%d'%(rank,len(pcm.cells_dict)))
    logger.debug('%d:'%rank + ('\n%d '%rank).join([str(c) for c  in pcm.cells_dict.values()]))

    # on processor 0 move all particles from one of its cell to the next cell
    if rank == 0:
        cell = pcm.cells_dict.get(list(pcm.proc_map.cell_map.values()[0])[0])
        logger.debug('Cell is %s'%(cell))
        indices = []
        cell.get_particle_ids(indices)
        indices = indices[0]
        logger.debug('Num particles in Cell is %d'%(indices.length))
        parr = cell.arrays_to_bin[0]
        x, y, z = parr.get('x', 'y', 'z', only_real_particles=False)
        logger.debug(str(len(x)) + str(x))
        logger.debug(str(indices.length) + str(indices.get_npy_array()))
        for i in range(indices.length):
            x[indices[i]] += cell.cell_size

        parr.set_dirty(True)

        pcm.update_status()
        logger.debug('Calling cell manager update')
        logger.debug('Is dirty %s'%(pcm.is_dirty))
        pcm.update()

        np = pcm.arrays_to_bin[0].num_real_particles
        nptot = comm.bcast(comm.reduce(np))
        assert nptot == num_particles

        #logger.debug('hierarchy :%s'%(pcm.hierarchy_list))
        logger.debug('cells : %s'%(pcm.cells_dict))
        logger.debug('num particles : %d'%(parray.get_number_of_particles()))
        logger.debug('real particles : %d'%(parray.num_real_particles))
示例#6
0
    def _create_particles(self, variable_h, callable, min_cell_size=-1,
                         *args, **kw):
        """ Create particles given a callable and any arguments to it.
        This will also automatically distribute the particles among
        processors if this is a parallel run.  Returns the `Particles`
        instance that is created.
        """

        num_procs = self.num_procs
        rank = self.rank
        data = None
        if rank == 0:
            # Only master creates the particles.
            pa = callable(*args, **kw)
            distr_func = self._distr_func
            if num_procs > 1:
                # Use the offline load-balancer to distribute the data
                # initially. Negative cell size forces automatic computation. 
                data = LoadBalancer.distribute_particles(pa, 
                                                         num_procs=num_procs, 
                                                         block_size=-1, 
                                                         distr_func=distr_func)
        if num_procs > 1:
            # Now scatter the distributed data.
            pa = self.comm.scatter(data, root=0)

        self.particle_array = pa

        in_parallel = num_procs > 1
        if isinstance(pa, (ParticleArray,)):
            pa = [pa]

        no_load_balance = self.options.no_load_balance
        if no_load_balance:
            self.load_balance = False
        else:
            self.load_balance = True

        if self.options.with_cl:

            cl_locator_type = kw.get('cl_locator_type', None)
            domain_manager_type = kw.get('domain_manager_type', None)

            if cl_locator_type and domain_manager_type:

                self.particles = CLParticles(
                    arrays=pa, cl_locator_type=cl_locator_type,
                    domain_manager_type=domain_manager_type)

            else:
                self.particles = CLParticles(arrays=pa)
                
        else:

            locator_type = kw.get('locator_type', None)

            if locator_type:
                if locator_type not in [LocatorType.NSquareNeighborLocator,
                                        LocatorType.SPHNeighborLocator]:

                    msg = "locator type %d not understood"%(locator_type)
                    raise RuntimeError(msg)

            else:
                locator_type = LocatorType.SPHNeighborLocator

            self.particles = Particles(arrays=pa, variable_h=variable_h,
                                       in_parallel=in_parallel,
                                       load_balancing=self.load_balance,
                                       update_particles=True,
                                       min_cell_size=min_cell_size,
                                       locator_type=locator_type)

        return self.particles
示例#7
0
                       x={'data': x},
                       y={'data': y},
                       z={'data': z},
                       h={
                           'data': None,
                           'default': 0.5
                       })

# add parray to the cell manager
parray.add_property({'name': 'u'})
parray.add_property({'name': 'v'})
parray.add_property({'name': 'w'})
parray.add_property({'name': 'rho'})
parray.add_property({'name': 'p'})

parray = LoadBalancer.distribute_particles(parray, num_procs, 1.0)[rank]
pcm.add_array_to_bin(parray)

np = pcm.arrays_to_bin[0].num_real_particles
nptot = comm.bcast(comm.reduce(np))
assert nptot == num_particles

pcm.initialize()

np = pcm.arrays_to_bin[0].num_real_particles
nptot = comm.bcast(comm.reduce(np))
assert nptot == num_particles

pcm.set_jump_tolerance(INT_INF())

logger.debug('%d: num_cells=%d' % (rank, len(pcm.cells_dict)))